1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4 * Copyright (C) 1992 Eric Youngdale
5 * Simulate a host adapter with 2 disks attached. Do a lot of checking
6 * to make sure that we are not getting blocks mixed up, and PANIC if
7 * anything out of the ordinary is seen.
8 * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
10 * Copyright (C) 2001 - 2020 Douglas Gilbert
12 * For documentation see http://sg.danny.cz/sg/scsi_debug.html
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
18 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/genhd.h>
28 #include <linux/init.h>
29 #include <linux/proc_fs.h>
30 #include <linux/vmalloc.h>
31 #include <linux/moduleparam.h>
32 #include <linux/scatterlist.h>
33 #include <linux/blkdev.h>
34 #include <linux/crc-t10dif.h>
35 #include <linux/spinlock.h>
36 #include <linux/interrupt.h>
37 #include <linux/atomic.h>
38 #include <linux/hrtimer.h>
39 #include <linux/uuid.h>
40 #include <linux/t10-pi.h>
41 #include <linux/msdos_partition.h>
42 #include <linux/random.h>
43 #include <linux/xarray.h>
44 #include <linux/prefetch.h>
46 #include <net/checksum.h>
48 #include <asm/unaligned.h>
50 #include <scsi/scsi.h>
51 #include <scsi/scsi_cmnd.h>
52 #include <scsi/scsi_device.h>
53 #include <scsi/scsi_host.h>
54 #include <scsi/scsicam.h>
55 #include <scsi/scsi_eh.h>
56 #include <scsi/scsi_tcq.h>
57 #include <scsi/scsi_dbg.h>
60 #include "scsi_logging.h"
62 /* make sure inq_product_rev string corresponds to this version */
63 #define SDEBUG_VERSION "0190" /* format to fit INQUIRY revision field */
64 static const char *sdebug_version_date = "20200710";
66 #define MY_NAME "scsi_debug"
68 /* Additional Sense Code (ASC) */
69 #define NO_ADDITIONAL_SENSE 0x0
70 #define LOGICAL_UNIT_NOT_READY 0x4
71 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
72 #define UNRECOVERED_READ_ERR 0x11
73 #define PARAMETER_LIST_LENGTH_ERR 0x1a
74 #define INVALID_OPCODE 0x20
75 #define LBA_OUT_OF_RANGE 0x21
76 #define INVALID_FIELD_IN_CDB 0x24
77 #define INVALID_FIELD_IN_PARAM_LIST 0x26
78 #define WRITE_PROTECTED 0x27
79 #define UA_RESET_ASC 0x29
80 #define UA_CHANGED_ASC 0x2a
81 #define TARGET_CHANGED_ASC 0x3f
82 #define LUNS_CHANGED_ASCQ 0x0e
83 #define INSUFF_RES_ASC 0x55
84 #define INSUFF_RES_ASCQ 0x3
85 #define POWER_ON_RESET_ASCQ 0x0
86 #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
87 #define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
88 #define CAPACITY_CHANGED_ASCQ 0x9
89 #define SAVING_PARAMS_UNSUP 0x39
90 #define TRANSPORT_PROBLEM 0x4b
91 #define THRESHOLD_EXCEEDED 0x5d
92 #define LOW_POWER_COND_ON 0x5e
93 #define MISCOMPARE_VERIFY_ASC 0x1d
94 #define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
95 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
96 #define WRITE_ERROR_ASC 0xc
97 #define UNALIGNED_WRITE_ASCQ 0x4
98 #define WRITE_BOUNDARY_ASCQ 0x5
99 #define READ_INVDATA_ASCQ 0x6
100 #define READ_BOUNDARY_ASCQ 0x7
101 #define INSUFF_ZONE_ASCQ 0xe
103 /* Additional Sense Code Qualifier (ASCQ) */
104 #define ACK_NAK_TO 0x3
106 /* Default values for driver parameters */
107 #define DEF_NUM_HOST 1
108 #define DEF_NUM_TGTS 1
109 #define DEF_MAX_LUNS 1
110 /* With these defaults, this driver will make 1 host with 1 target
111 * (id 0) containing 1 logical unit (lun 0). That is 1 device.
114 #define DEF_CDB_LEN 10
115 #define DEF_JDELAY 1 /* if > 0 unit is a jiffy */
116 #define DEF_DEV_SIZE_PRE_INIT 0
117 #define DEF_DEV_SIZE_MB 8
118 #define DEF_ZBC_DEV_SIZE_MB 128
121 #define DEF_PER_HOST_STORE false
122 #define DEF_D_SENSE 0
123 #define DEF_EVERY_NTH 0
124 #define DEF_FAKE_RW 0
126 #define DEF_HOST_LOCK 0
129 #define DEF_LBPWS10 0
131 #define DEF_LOWEST_ALIGNED 0
132 #define DEF_NDELAY 0 /* if > 0 unit is a nanosecond */
133 #define DEF_NO_LUN_0 0
134 #define DEF_NUM_PARTS 0
136 #define DEF_OPT_BLKS 1024
137 #define DEF_PHYSBLK_EXP 0
138 #define DEF_OPT_XFERLEN_EXP 0
139 #define DEF_PTYPE TYPE_DISK
140 #define DEF_RANDOM false
141 #define DEF_REMOVABLE false
142 #define DEF_SCSI_LEVEL 7 /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
143 #define DEF_SECTOR_SIZE 512
144 #define DEF_UNMAP_ALIGNMENT 0
145 #define DEF_UNMAP_GRANULARITY 1
146 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
147 #define DEF_UNMAP_MAX_DESC 256
148 #define DEF_VIRTUAL_GB 0
149 #define DEF_VPD_USE_HOSTNO 1
150 #define DEF_WRITESAME_LENGTH 0xFFFF
152 #define DEF_STATISTICS false
153 #define DEF_SUBMIT_QUEUES 1
154 #define DEF_TUR_MS_TO_READY 0
155 #define DEF_UUID_CTL 0
156 #define JDELAY_OVERRIDDEN -9999
158 /* Default parameters for ZBC drives */
159 #define DEF_ZBC_ZONE_SIZE_MB 128
160 #define DEF_ZBC_MAX_OPEN_ZONES 8
161 #define DEF_ZBC_NR_CONV_ZONES 1
163 #define SDEBUG_LUN_0_VAL 0
165 /* bit mask values for sdebug_opts */
166 #define SDEBUG_OPT_NOISE 1
167 #define SDEBUG_OPT_MEDIUM_ERR 2
168 #define SDEBUG_OPT_TIMEOUT 4
169 #define SDEBUG_OPT_RECOVERED_ERR 8
170 #define SDEBUG_OPT_TRANSPORT_ERR 16
171 #define SDEBUG_OPT_DIF_ERR 32
172 #define SDEBUG_OPT_DIX_ERR 64
173 #define SDEBUG_OPT_MAC_TIMEOUT 128
174 #define SDEBUG_OPT_SHORT_TRANSFER 0x100
175 #define SDEBUG_OPT_Q_NOISE 0x200
176 #define SDEBUG_OPT_ALL_TSF 0x400
177 #define SDEBUG_OPT_RARE_TSF 0x800
178 #define SDEBUG_OPT_N_WCE 0x1000
179 #define SDEBUG_OPT_RESET_NOISE 0x2000
180 #define SDEBUG_OPT_NO_CDB_NOISE 0x4000
181 #define SDEBUG_OPT_HOST_BUSY 0x8000
182 #define SDEBUG_OPT_CMD_ABORT 0x10000
183 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
184 SDEBUG_OPT_RESET_NOISE)
185 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
186 SDEBUG_OPT_TRANSPORT_ERR | \
187 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
188 SDEBUG_OPT_SHORT_TRANSFER | \
189 SDEBUG_OPT_HOST_BUSY | \
190 SDEBUG_OPT_CMD_ABORT)
191 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
192 SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
194 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
195 * priority order. In the subset implemented here lower numbers have higher
196 * priority. The UA numbers should be a sequence starting from 0 with
197 * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
198 #define SDEBUG_UA_POR 0 /* Power on, reset, or bus device reset */
199 #define SDEBUG_UA_BUS_RESET 1
200 #define SDEBUG_UA_MODE_CHANGED 2
201 #define SDEBUG_UA_CAPACITY_CHANGED 3
202 #define SDEBUG_UA_LUNS_CHANGED 4
203 #define SDEBUG_UA_MICROCODE_CHANGED 5 /* simulate firmware change */
204 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 6
205 #define SDEBUG_NUM_UAS 7
207 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
208 * sector on read commands: */
209 #define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
210 #define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
212 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
213 * (for response) per submit queue at one time. Can be reduced by max_queue
214 * option. Command responses are not queued when jdelay=0 and ndelay=0. The
215 * per-device DEF_CMD_PER_LUN can be changed via sysfs:
216 * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
217 * but cannot exceed SDEBUG_CANQUEUE .
219 #define SDEBUG_CANQUEUE_WORDS 3 /* a WORD is bits in a long */
220 #define SDEBUG_CANQUEUE (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
221 #define DEF_CMD_PER_LUN SDEBUG_CANQUEUE
223 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
224 #define F_D_IN 1 /* Data-in command (e.g. READ) */
225 #define F_D_OUT 2 /* Data-out command (e.g. WRITE) */
226 #define F_D_OUT_MAYBE 4 /* WRITE SAME, NDOB bit */
228 #define F_RL_WLUN_OK 0x10 /* allowed with REPORT LUNS W-LUN */
229 #define F_SKIP_UA 0x20 /* bypass UAs (e.g. INQUIRY command) */
230 #define F_DELAY_OVERR 0x40 /* for commands like INQUIRY */
231 #define F_SA_LOW 0x80 /* SA is in cdb byte 1, bits 4 to 0 */
232 #define F_SA_HIGH 0x100 /* SA is in cdb bytes 8 and 9 */
233 #define F_INV_OP 0x200 /* invalid opcode (not supported) */
234 #define F_FAKE_RW 0x400 /* bypass resp_*() when fake_rw set */
235 #define F_M_ACCESS 0x800 /* media access, reacts to SSU state */
236 #define F_SSU_DELAY 0x1000 /* SSU command delay (long-ish) */
237 #define F_SYNC_DELAY 0x2000 /* SYNCHRONIZE CACHE delay */
239 /* Useful combinations of the above flags */
240 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
241 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
242 #define FF_SA (F_SA_HIGH | F_SA_LOW)
243 #define F_LONG_DELAY (F_SSU_DELAY | F_SYNC_DELAY)
245 #define SDEBUG_MAX_PARTS 4
247 #define SDEBUG_MAX_CMD_LEN 32
249 #define SDEB_XA_NOT_IN_USE XA_MARK_1
251 /* Zone types (zbcr05 table 25) */
253 ZBC_ZONE_TYPE_CNV = 0x1,
254 ZBC_ZONE_TYPE_SWR = 0x2,
255 ZBC_ZONE_TYPE_SWP = 0x3,
258 /* enumeration names taken from table 26, zbcr05 */
260 ZBC_NOT_WRITE_POINTER = 0x0,
262 ZC2_IMPLICIT_OPEN = 0x2,
263 ZC3_EXPLICIT_OPEN = 0x3,
270 struct sdeb_zone_state { /* ZBC: per zone state */
271 enum sdebug_z_type z_type;
272 enum sdebug_z_cond z_cond;
273 bool z_non_seq_resource;
279 struct sdebug_dev_info {
280 struct list_head dev_list;
281 unsigned int channel;
285 struct sdebug_host_info *sdbg_host;
286 unsigned long uas_bm[1];
288 atomic_t stopped; /* 1: by SSU, 2: device start */
291 /* For ZBC devices */
292 enum blk_zoned_model zmodel;
294 unsigned int zsize_shift;
295 unsigned int nr_zones;
296 unsigned int nr_conv_zones;
297 unsigned int nr_imp_open;
298 unsigned int nr_exp_open;
299 unsigned int nr_closed;
300 unsigned int max_open;
301 ktime_t create_ts; /* time since bootup that this device was created */
302 struct sdeb_zone_state *zstate;
305 struct sdebug_host_info {
306 struct list_head host_list;
307 int si_idx; /* sdeb_store_info (per host) xarray index */
308 struct Scsi_Host *shost;
310 struct list_head dev_info_list;
313 /* There is an xarray of pointers to this struct's objects, one per host */
314 struct sdeb_store_info {
315 rwlock_t macc_lck; /* for atomic media access on this store */
316 u8 *storep; /* user data storage (ram) */
317 struct t10_pi_tuple *dif_storep; /* protection info */
318 void *map_storep; /* provisioning map */
321 #define to_sdebug_host(d) \
322 container_of(d, struct sdebug_host_info, dev)
324 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
325 SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
327 struct sdebug_defer {
329 struct execute_work ew;
330 ktime_t cmpl_ts;/* time since boot to complete this cmd */
331 int sqa_idx; /* index of sdebug_queue array */
332 int qc_idx; /* index of sdebug_queued_cmd array within sqa_idx */
333 int hc_idx; /* hostwide tag index */
338 bool aborted; /* true when blk_abort_request() already called */
339 enum sdeb_defer_type defer_t;
342 struct sdebug_queued_cmd {
343 /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
344 * instance indicates this slot is in use.
346 struct sdebug_defer *sd_dp;
347 struct scsi_cmnd *a_cmnd;
350 struct sdebug_queue {
351 struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
352 unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
354 atomic_t blocked; /* to temporarily stop more being queued */
357 static atomic_t sdebug_cmnd_count; /* number of incoming commands */
358 static atomic_t sdebug_completions; /* count of deferred completions */
359 static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */
360 static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */
361 static atomic_t sdeb_inject_pending;
362 static atomic_t sdeb_mq_poll_count; /* bumped when mq_poll returns > 0 */
364 struct opcode_info_t {
365 u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */
366 /* for terminating element */
367 u8 opcode; /* if num_attached > 0, preferred */
368 u16 sa; /* service action */
369 u32 flags; /* OR-ed set of SDEB_F_* */
370 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
371 const struct opcode_info_t *arrp; /* num_attached elements or NULL */
372 u8 len_mask[16]; /* len_mask[0]-->cdb_len, then mask for cdb */
373 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
376 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
377 enum sdeb_opcode_index {
378 SDEB_I_INVALID_OPCODE = 0,
380 SDEB_I_REPORT_LUNS = 2,
381 SDEB_I_REQUEST_SENSE = 3,
382 SDEB_I_TEST_UNIT_READY = 4,
383 SDEB_I_MODE_SENSE = 5, /* 6, 10 */
384 SDEB_I_MODE_SELECT = 6, /* 6, 10 */
385 SDEB_I_LOG_SENSE = 7,
386 SDEB_I_READ_CAPACITY = 8, /* 10; 16 is in SA_IN(16) */
387 SDEB_I_READ = 9, /* 6, 10, 12, 16 */
388 SDEB_I_WRITE = 10, /* 6, 10, 12, 16 */
389 SDEB_I_START_STOP = 11,
390 SDEB_I_SERV_ACT_IN_16 = 12, /* add ...SERV_ACT_IN_12 if needed */
391 SDEB_I_SERV_ACT_OUT_16 = 13, /* add ...SERV_ACT_OUT_12 if needed */
392 SDEB_I_MAINT_IN = 14,
393 SDEB_I_MAINT_OUT = 15,
394 SDEB_I_VERIFY = 16, /* VERIFY(10), VERIFY(16) */
395 SDEB_I_VARIABLE_LEN = 17, /* READ(32), WRITE(32), WR_SCAT(32) */
396 SDEB_I_RESERVE = 18, /* 6, 10 */
397 SDEB_I_RELEASE = 19, /* 6, 10 */
398 SDEB_I_ALLOW_REMOVAL = 20, /* PREVENT ALLOW MEDIUM REMOVAL */
399 SDEB_I_REZERO_UNIT = 21, /* REWIND in SSC */
400 SDEB_I_ATA_PT = 22, /* 12, 16 */
401 SDEB_I_SEND_DIAG = 23,
403 SDEB_I_WRITE_BUFFER = 25,
404 SDEB_I_WRITE_SAME = 26, /* 10, 16 */
405 SDEB_I_SYNC_CACHE = 27, /* 10, 16 */
406 SDEB_I_COMP_WRITE = 28,
407 SDEB_I_PRE_FETCH = 29, /* 10, 16 */
408 SDEB_I_ZONE_OUT = 30, /* 0x94+SA; includes no data xfer */
409 SDEB_I_ZONE_IN = 31, /* 0x95+SA; all have data-in */
410 SDEB_I_LAST_ELEM_P1 = 32, /* keep this last (previous + 1) */
414 static const unsigned char opcode_ind_arr[256] = {
415 /* 0x0; 0x0->0x1f: 6 byte cdbs */
416 SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
418 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
419 0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
421 0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
422 SDEB_I_ALLOW_REMOVAL, 0,
423 /* 0x20; 0x20->0x3f: 10 byte cdbs */
424 0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
425 SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
426 0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
427 0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
428 /* 0x40; 0x40->0x5f: 10 byte cdbs */
429 0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
430 0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
431 0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
433 0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
434 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
435 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
436 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
437 0, SDEB_I_VARIABLE_LEN,
438 /* 0x80; 0x80->0x9f: 16 byte cdbs */
439 0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
440 SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
441 0, 0, 0, SDEB_I_VERIFY,
442 SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
443 SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
444 0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
445 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
446 SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
447 SDEB_I_MAINT_OUT, 0, 0, 0,
448 SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
449 0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
450 0, 0, 0, 0, 0, 0, 0, 0,
451 0, 0, 0, 0, 0, 0, 0, 0,
452 /* 0xc0; 0xc0->0xff: vendor specific */
453 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
454 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
455 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
456 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
460 * The following "response" functions return the SCSI mid-level's 4 byte
461 * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
462 * command completion, they can mask their return value with
463 * SDEG_RES_IMMED_MASK .
465 #define SDEG_RES_IMMED_MASK 0x40000000
467 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
468 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
469 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
470 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
471 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
472 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
473 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
474 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
475 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
476 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
477 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
478 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
479 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
480 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
481 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
482 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
483 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
484 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
485 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
486 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
487 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
488 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
489 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
490 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
491 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
492 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
493 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
494 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
495 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
497 static int sdebug_do_add_host(bool mk_new_store);
498 static int sdebug_add_host_helper(int per_host_idx);
499 static void sdebug_do_remove_host(bool the_end);
500 static int sdebug_add_store(void);
501 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
502 static void sdebug_erase_all_stores(bool apart_from_first);
505 * The following are overflow arrays for cdbs that "hit" the same index in
506 * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
507 * should be placed in opcode_info_arr[], the others should be placed here.
509 static const struct opcode_info_t msense_iarr[] = {
510 {0, 0x1a, 0, F_D_IN, NULL, NULL,
511 {6, 0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
514 static const struct opcode_info_t mselect_iarr[] = {
515 {0, 0x15, 0, F_D_OUT, NULL, NULL,
516 {6, 0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
519 static const struct opcode_info_t read_iarr[] = {
520 {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
521 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
523 {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
524 {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
525 {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
526 {12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
530 static const struct opcode_info_t write_iarr[] = {
531 {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(10) */
532 NULL, {10, 0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
534 {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(6) */
535 NULL, {6, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
537 {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0, /* WRITE(12) */
538 NULL, {12, 0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
539 0xbf, 0xc7, 0, 0, 0, 0} },
542 static const struct opcode_info_t verify_iarr[] = {
543 {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
544 NULL, {10, 0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
548 static const struct opcode_info_t sa_in_16_iarr[] = {
549 {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
550 {16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
551 0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
554 static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
555 {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
556 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
557 0, 0xff, 0xff, 0xff, 0xff} }, /* WRITE(32) */
558 {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
559 NULL, {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
560 0, 0xff, 0xff, 0x0, 0x0} }, /* WRITE SCATTERED(32) */
563 static const struct opcode_info_t maint_in_iarr[] = { /* MAINT IN */
564 {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
565 {12, 0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
566 0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
567 {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
568 {12, 0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
569 0, 0} }, /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
572 static const struct opcode_info_t write_same_iarr[] = {
573 {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
574 {16, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
575 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* WRITE SAME(16) */
578 static const struct opcode_info_t reserve_iarr[] = {
579 {0, 0x16, 0, F_D_OUT, NULL, NULL, /* RESERVE(6) */
580 {6, 0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
583 static const struct opcode_info_t release_iarr[] = {
584 {0, 0x17, 0, F_D_OUT, NULL, NULL, /* RELEASE(6) */
585 {6, 0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
588 static const struct opcode_info_t sync_cache_iarr[] = {
589 {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
590 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
591 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* SYNC_CACHE (16) */
594 static const struct opcode_info_t pre_fetch_iarr[] = {
595 {0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
596 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
597 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* PRE-FETCH (16) */
600 static const struct opcode_info_t zone_out_iarr[] = { /* ZONE OUT(16) */
601 {0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
602 {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
603 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* CLOSE ZONE */
604 {0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
605 {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
606 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* FINISH ZONE */
607 {0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
608 {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
609 0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} }, /* RESET WRITE POINTER */
612 static const struct opcode_info_t zone_in_iarr[] = { /* ZONE IN(16) */
613 {0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
614 {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
615 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
619 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
620 * plus the terminating elements for logic that scans this table such as
621 * REPORT SUPPORTED OPERATION CODES. */
622 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
624 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* unknown opcodes */
625 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
626 {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
627 {6, 0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
628 {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
629 {12, 0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
630 0, 0} }, /* REPORT LUNS */
631 {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
632 {6, 0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
633 {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
634 {6, 0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
636 {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN, /* MODE SENSE(10) */
637 resp_mode_sense, msense_iarr, {10, 0xf8, 0xff, 0xff, 0, 0, 0,
638 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
639 {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT, /* MODE SELECT(10) */
640 resp_mode_select, mselect_iarr, {10, 0xf1, 0, 0, 0, 0, 0, 0xff,
641 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
642 {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL, /* LOG SENSE */
643 {10, 0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
645 {0, 0x25, 0, F_D_IN, resp_readcap, NULL, /* READ CAPACITY(10) */
646 {10, 0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
648 {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
649 resp_read_dt0, read_iarr, {16, 0xfe, 0xff, 0xff, 0xff, 0xff,
650 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
652 {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
653 resp_write_dt0, write_iarr, /* WRITE(16) */
654 {16, 0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
655 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
656 {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
657 {6, 0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
658 {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
659 resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
660 {16, 0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
661 0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
662 {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
663 NULL, {16, 0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
664 0xff, 0xff, 0xff, 0xff, 0xc7} }, /* SA_OUT(16), WRITE SCAT(16) */
665 {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
666 resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
667 maint_in_iarr, {12, 0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
668 0xff, 0, 0xc7, 0, 0, 0, 0} },
670 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
671 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
672 {ARRAY_SIZE(verify_iarr), 0x8f, 0,
673 F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify, /* VERIFY(16) */
674 verify_iarr, {16, 0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
675 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
676 {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
677 resp_read_dt0, vl_iarr, /* VARIABLE LENGTH, READ(32) */
678 {32, 0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
680 {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
681 NULL, reserve_iarr, /* RESERVE(10) <no response function> */
682 {10, 0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
684 {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
685 NULL, release_iarr, /* RELEASE(10) <no response function> */
686 {10, 0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
689 {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
690 {6, 0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
691 {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
692 {6, 0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
693 {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
694 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
695 {0, 0x1d, F_D_OUT, 0, NULL, NULL, /* SEND DIAGNOSTIC */
696 {6, 0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
697 {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
698 {10, 0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
700 {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
701 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
702 0, 0, 0, 0} }, /* WRITE_BUFFER */
703 {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
704 resp_write_same_10, write_same_iarr, /* WRITE SAME(10) */
705 {10, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
707 {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
708 resp_sync_cache, sync_cache_iarr,
709 {10, 0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
710 0, 0, 0, 0} }, /* SYNC_CACHE (10) */
711 {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
712 {16, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
713 0, 0xff, 0x3f, 0xc7} }, /* COMPARE AND WRITE */
714 {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
715 resp_pre_fetch, pre_fetch_iarr,
716 {10, 0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
717 0, 0, 0, 0} }, /* PRE-FETCH (10) */
720 {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
721 resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
722 {16, 0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
723 0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
724 {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
725 resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
726 {16, 0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
727 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
729 {0xff, 0, 0, 0, NULL, NULL, /* terminating element */
730 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
733 static int sdebug_num_hosts;
734 static int sdebug_add_host = DEF_NUM_HOST; /* in sysfs this is relative */
735 static int sdebug_ato = DEF_ATO;
736 static int sdebug_cdb_len = DEF_CDB_LEN;
737 static int sdebug_jdelay = DEF_JDELAY; /* if > 0 then unit is jiffies */
738 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
739 static int sdebug_dif = DEF_DIF;
740 static int sdebug_dix = DEF_DIX;
741 static int sdebug_dsense = DEF_D_SENSE;
742 static int sdebug_every_nth = DEF_EVERY_NTH;
743 static int sdebug_fake_rw = DEF_FAKE_RW;
744 static unsigned int sdebug_guard = DEF_GUARD;
745 static int sdebug_host_max_queue; /* per host */
746 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
747 static int sdebug_max_luns = DEF_MAX_LUNS;
748 static int sdebug_max_queue = SDEBUG_CANQUEUE; /* per submit queue */
749 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
750 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
751 static atomic_t retired_max_queue; /* if > 0 then was prior max_queue */
752 static int sdebug_ndelay = DEF_NDELAY; /* if > 0 then unit is nanoseconds */
753 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
754 static int sdebug_no_uld;
755 static int sdebug_num_parts = DEF_NUM_PARTS;
756 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
757 static int sdebug_opt_blks = DEF_OPT_BLKS;
758 static int sdebug_opts = DEF_OPTS;
759 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
760 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
761 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
762 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
763 static int sdebug_sector_size = DEF_SECTOR_SIZE;
764 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
765 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
766 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
767 static unsigned int sdebug_lbpu = DEF_LBPU;
768 static unsigned int sdebug_lbpws = DEF_LBPWS;
769 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
770 static unsigned int sdebug_lbprz = DEF_LBPRZ;
771 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
772 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
773 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
774 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
775 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
776 static int sdebug_uuid_ctl = DEF_UUID_CTL;
777 static bool sdebug_random = DEF_RANDOM;
778 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
779 static bool sdebug_removable = DEF_REMOVABLE;
780 static bool sdebug_clustering;
781 static bool sdebug_host_lock = DEF_HOST_LOCK;
782 static bool sdebug_strict = DEF_STRICT;
783 static bool sdebug_any_injecting_opt;
784 static bool sdebug_verbose;
785 static bool have_dif_prot;
786 static bool write_since_sync;
787 static bool sdebug_statistics = DEF_STATISTICS;
788 static bool sdebug_wp;
789 /* Following enum: 0: no zbc, def; 1: host aware; 2: host managed */
790 static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
791 static char *sdeb_zbc_model_s;
793 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
794 SAM_LUN_AM_FLAT = 0x1,
795 SAM_LUN_AM_LOGICAL_UNIT = 0x2,
796 SAM_LUN_AM_EXTENDED = 0x3};
797 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
798 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
800 static unsigned int sdebug_store_sectors;
801 static sector_t sdebug_capacity; /* in sectors */
803 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
804 may still need them */
805 static int sdebug_heads; /* heads per disk */
806 static int sdebug_cylinders_per; /* cylinders per surface */
807 static int sdebug_sectors_per; /* sectors per cylinder */
809 static LIST_HEAD(sdebug_host_list);
810 static DEFINE_SPINLOCK(sdebug_host_list_lock);
812 static struct xarray per_store_arr;
813 static struct xarray *per_store_ap = &per_store_arr;
814 static int sdeb_first_idx = -1; /* invalid index ==> none created */
815 static int sdeb_most_recent_idx = -1;
816 static DEFINE_RWLOCK(sdeb_fake_rw_lck); /* need a RW lock when fake_rw=1 */
818 static unsigned long map_size;
819 static int num_aborts;
820 static int num_dev_resets;
821 static int num_target_resets;
822 static int num_bus_resets;
823 static int num_host_resets;
824 static int dix_writes;
825 static int dix_reads;
826 static int dif_errors;
828 /* ZBC global data */
829 static bool sdeb_zbc_in_use; /* true for host-aware and host-managed disks */
830 static int sdeb_zbc_zone_size_mb;
831 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
832 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
834 static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
835 static int poll_queues; /* iouring iopoll interface.*/
836 static struct sdebug_queue *sdebug_q_arr; /* ptr to array of submit queues */
838 static DEFINE_RWLOCK(atomic_rw);
839 static DEFINE_RWLOCK(atomic_rw2);
841 static rwlock_t *ramdisk_lck_a[2];
843 static char sdebug_proc_name[] = MY_NAME;
844 static const char *my_name = MY_NAME;
846 static struct bus_type pseudo_lld_bus;
848 static struct device_driver sdebug_driverfs_driver = {
849 .name = sdebug_proc_name,
850 .bus = &pseudo_lld_bus,
853 static const int check_condition_result =
854 SAM_STAT_CHECK_CONDITION;
856 static const int illegal_condition_result =
857 (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
859 static const int device_qfull_result =
860 (DID_OK << 16) | SAM_STAT_TASK_SET_FULL;
862 static const int condition_met_result = SAM_STAT_CONDITION_MET;
865 /* Only do the extra work involved in logical block provisioning if one or
866 * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
867 * real reads and writes (i.e. not skipping them for speed).
869 static inline bool scsi_debug_lbp(void)
871 return 0 == sdebug_fake_rw &&
872 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
875 static void *lba2fake_store(struct sdeb_store_info *sip,
876 unsigned long long lba)
878 struct sdeb_store_info *lsip = sip;
880 lba = do_div(lba, sdebug_store_sectors);
881 if (!sip || !sip->storep) {
883 lsip = xa_load(per_store_ap, 0); /* should never be NULL */
885 return lsip->storep + lba * sdebug_sector_size;
888 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
891 sector = sector_div(sector, sdebug_store_sectors);
893 return sip->dif_storep + sector;
896 static void sdebug_max_tgts_luns(void)
898 struct sdebug_host_info *sdbg_host;
899 struct Scsi_Host *hpnt;
901 spin_lock(&sdebug_host_list_lock);
902 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
903 hpnt = sdbg_host->shost;
904 if ((hpnt->this_id >= 0) &&
905 (sdebug_num_tgts > hpnt->this_id))
906 hpnt->max_id = sdebug_num_tgts + 1;
908 hpnt->max_id = sdebug_num_tgts;
909 /* sdebug_max_luns; */
910 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
912 spin_unlock(&sdebug_host_list_lock);
915 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
917 /* Set in_bit to -1 to indicate no bit position of invalid field */
918 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
919 enum sdeb_cmd_data c_d,
920 int in_byte, int in_bit)
922 unsigned char *sbuff;
926 sbuff = scp->sense_buffer;
928 sdev_printk(KERN_ERR, scp->device,
929 "%s: sense_buffer is NULL\n", __func__);
932 asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
933 memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
934 scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
935 memset(sks, 0, sizeof(sks));
941 sks[0] |= 0x7 & in_bit;
943 put_unaligned_be16(in_byte, sks + 1);
949 memcpy(sbuff + sl + 4, sks, 3);
951 memcpy(sbuff + 15, sks, 3);
953 sdev_printk(KERN_INFO, scp->device, "%s: [sense_key,asc,ascq"
954 "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
955 my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
958 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
960 if (!scp->sense_buffer) {
961 sdev_printk(KERN_ERR, scp->device,
962 "%s: sense_buffer is NULL\n", __func__);
965 memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
967 scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
970 sdev_printk(KERN_INFO, scp->device,
971 "%s: [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
972 my_name, key, asc, asq);
975 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
977 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
980 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
983 if (sdebug_verbose) {
985 sdev_printk(KERN_INFO, dev,
986 "%s: BLKFLSBUF [0x1261]\n", __func__);
987 else if (0x5331 == cmd)
988 sdev_printk(KERN_INFO, dev,
989 "%s: CDROM_GET_CAPABILITY [0x5331]\n",
992 sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
996 /* return -ENOTTY; // correct return but upsets fdisk */
999 static void config_cdb_len(struct scsi_device *sdev)
1001 switch (sdebug_cdb_len) {
1002 case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1003 sdev->use_10_for_rw = false;
1004 sdev->use_16_for_rw = false;
1005 sdev->use_10_for_ms = false;
1007 case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1008 sdev->use_10_for_rw = true;
1009 sdev->use_16_for_rw = false;
1010 sdev->use_10_for_ms = false;
1012 case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1013 sdev->use_10_for_rw = true;
1014 sdev->use_16_for_rw = false;
1015 sdev->use_10_for_ms = true;
1018 sdev->use_10_for_rw = false;
1019 sdev->use_16_for_rw = true;
1020 sdev->use_10_for_ms = true;
1022 case 32: /* No knobs to suggest this so same as 16 for now */
1023 sdev->use_10_for_rw = false;
1024 sdev->use_16_for_rw = true;
1025 sdev->use_10_for_ms = true;
1028 pr_warn("unexpected cdb_len=%d, force to 10\n",
1030 sdev->use_10_for_rw = true;
1031 sdev->use_16_for_rw = false;
1032 sdev->use_10_for_ms = false;
1033 sdebug_cdb_len = 10;
1038 static void all_config_cdb_len(void)
1040 struct sdebug_host_info *sdbg_host;
1041 struct Scsi_Host *shost;
1042 struct scsi_device *sdev;
1044 spin_lock(&sdebug_host_list_lock);
1045 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1046 shost = sdbg_host->shost;
1047 shost_for_each_device(sdev, shost) {
1048 config_cdb_len(sdev);
1051 spin_unlock(&sdebug_host_list_lock);
1054 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1056 struct sdebug_host_info *sdhp;
1057 struct sdebug_dev_info *dp;
1059 spin_lock(&sdebug_host_list_lock);
1060 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
1061 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1062 if ((devip->sdbg_host == dp->sdbg_host) &&
1063 (devip->target == dp->target))
1064 clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1067 spin_unlock(&sdebug_host_list_lock);
1070 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1074 k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1075 if (k != SDEBUG_NUM_UAS) {
1076 const char *cp = NULL;
1080 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1081 POWER_ON_RESET_ASCQ);
1083 cp = "power on reset";
1085 case SDEBUG_UA_BUS_RESET:
1086 mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1091 case SDEBUG_UA_MODE_CHANGED:
1092 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1095 cp = "mode parameters changed";
1097 case SDEBUG_UA_CAPACITY_CHANGED:
1098 mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1099 CAPACITY_CHANGED_ASCQ);
1101 cp = "capacity data changed";
1103 case SDEBUG_UA_MICROCODE_CHANGED:
1104 mk_sense_buffer(scp, UNIT_ATTENTION,
1106 MICROCODE_CHANGED_ASCQ);
1108 cp = "microcode has been changed";
1110 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1111 mk_sense_buffer(scp, UNIT_ATTENTION,
1113 MICROCODE_CHANGED_WO_RESET_ASCQ);
1115 cp = "microcode has been changed without reset";
1117 case SDEBUG_UA_LUNS_CHANGED:
1119 * SPC-3 behavior is to report a UNIT ATTENTION with
1120 * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1121 * on the target, until a REPORT LUNS command is
1122 * received. SPC-4 behavior is to report it only once.
1123 * NOTE: sdebug_scsi_level does not use the same
1124 * values as struct scsi_device->scsi_level.
1126 if (sdebug_scsi_level >= 6) /* SPC-4 and above */
1127 clear_luns_changed_on_target(devip);
1128 mk_sense_buffer(scp, UNIT_ATTENTION,
1132 cp = "reported luns data has changed";
1135 pr_warn("unexpected unit attention code=%d\n", k);
1140 clear_bit(k, devip->uas_bm);
1142 sdev_printk(KERN_INFO, scp->device,
1143 "%s reports: Unit attention: %s\n",
1145 return check_condition_result;
1150 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1151 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1155 struct scsi_data_buffer *sdb = &scp->sdb;
1159 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1160 return DID_ERROR << 16;
1162 act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1164 scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1169 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1170 * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1171 * calls, not required to write in ascending offset order. Assumes resid
1172 * set to scsi_bufflen() prior to any calls.
1174 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1175 int arr_len, unsigned int off_dst)
1177 unsigned int act_len, n;
1178 struct scsi_data_buffer *sdb = &scp->sdb;
1179 off_t skip = off_dst;
1181 if (sdb->length <= off_dst)
1183 if (scp->sc_data_direction != DMA_FROM_DEVICE)
1184 return DID_ERROR << 16;
1186 act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1187 arr, arr_len, skip);
1188 pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1189 __func__, off_dst, scsi_bufflen(scp), act_len,
1190 scsi_get_resid(scp));
1191 n = scsi_bufflen(scp) - (off_dst + act_len);
1192 scsi_set_resid(scp, min_t(int, scsi_get_resid(scp), n));
1196 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1197 * 'arr' or -1 if error.
1199 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1202 if (!scsi_bufflen(scp))
1204 if (scp->sc_data_direction != DMA_TO_DEVICE)
1207 return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1211 static char sdebug_inq_vendor_id[9] = "Linux ";
1212 static char sdebug_inq_product_id[17] = "scsi_debug ";
1213 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1214 /* Use some locally assigned NAAs for SAS addresses. */
1215 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1216 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1217 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1219 /* Device identification VPD page. Returns number of bytes placed in arr */
1220 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1221 int target_dev_id, int dev_id_num,
1222 const char *dev_id_str, int dev_id_str_len,
1223 const uuid_t *lu_name)
1228 port_a = target_dev_id + 1;
1229 /* T10 vendor identifier field format (faked) */
1230 arr[0] = 0x2; /* ASCII */
1233 memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1234 memcpy(&arr[12], sdebug_inq_product_id, 16);
1235 memcpy(&arr[28], dev_id_str, dev_id_str_len);
1236 num = 8 + 16 + dev_id_str_len;
1239 if (dev_id_num >= 0) {
1240 if (sdebug_uuid_ctl) {
1241 /* Locally assigned UUID */
1242 arr[num++] = 0x1; /* binary (not necessarily sas) */
1243 arr[num++] = 0xa; /* PIV=0, lu, naa */
1246 arr[num++] = 0x10; /* uuid type=1, locally assigned */
1248 memcpy(arr + num, lu_name, 16);
1251 /* NAA-3, Logical unit identifier (binary) */
1252 arr[num++] = 0x1; /* binary (not necessarily sas) */
1253 arr[num++] = 0x3; /* PIV=0, lu, naa */
1256 put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1259 /* Target relative port number */
1260 arr[num++] = 0x61; /* proto=sas, binary */
1261 arr[num++] = 0x94; /* PIV=1, target port, rel port */
1262 arr[num++] = 0x0; /* reserved */
1263 arr[num++] = 0x4; /* length */
1264 arr[num++] = 0x0; /* reserved */
1265 arr[num++] = 0x0; /* reserved */
1267 arr[num++] = 0x1; /* relative port A */
1269 /* NAA-3, Target port identifier */
1270 arr[num++] = 0x61; /* proto=sas, binary */
1271 arr[num++] = 0x93; /* piv=1, target port, naa */
1274 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1276 /* NAA-3, Target port group identifier */
1277 arr[num++] = 0x61; /* proto=sas, binary */
1278 arr[num++] = 0x95; /* piv=1, target port group id */
1283 put_unaligned_be16(port_group_id, arr + num);
1285 /* NAA-3, Target device identifier */
1286 arr[num++] = 0x61; /* proto=sas, binary */
1287 arr[num++] = 0xa3; /* piv=1, target device, naa */
1290 put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1292 /* SCSI name string: Target device identifier */
1293 arr[num++] = 0x63; /* proto=sas, UTF-8 */
1294 arr[num++] = 0xa8; /* piv=1, target device, SCSI name string */
1297 memcpy(arr + num, "naa.32222220", 12);
1299 snprintf(b, sizeof(b), "%08X", target_dev_id);
1300 memcpy(arr + num, b, 8);
1302 memset(arr + num, 0, 4);
1307 static unsigned char vpd84_data[] = {
1308 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1309 0x22,0x22,0x22,0x0,0xbb,0x1,
1310 0x22,0x22,0x22,0x0,0xbb,0x2,
1313 /* Software interface identification VPD page */
1314 static int inquiry_vpd_84(unsigned char *arr)
1316 memcpy(arr, vpd84_data, sizeof(vpd84_data));
1317 return sizeof(vpd84_data);
1320 /* Management network addresses VPD page */
1321 static int inquiry_vpd_85(unsigned char *arr)
1324 const char *na1 = "https://www.kernel.org/config";
1325 const char *na2 = "http://www.kernel.org/log";
1328 arr[num++] = 0x1; /* lu, storage config */
1329 arr[num++] = 0x0; /* reserved */
1334 plen = ((plen / 4) + 1) * 4;
1335 arr[num++] = plen; /* length, null termianted, padded */
1336 memcpy(arr + num, na1, olen);
1337 memset(arr + num + olen, 0, plen - olen);
1340 arr[num++] = 0x4; /* lu, logging */
1341 arr[num++] = 0x0; /* reserved */
1346 plen = ((plen / 4) + 1) * 4;
1347 arr[num++] = plen; /* length, null terminated, padded */
1348 memcpy(arr + num, na2, olen);
1349 memset(arr + num + olen, 0, plen - olen);
1355 /* SCSI ports VPD page */
1356 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1361 port_a = target_dev_id + 1;
1362 port_b = port_a + 1;
1363 arr[num++] = 0x0; /* reserved */
1364 arr[num++] = 0x0; /* reserved */
1366 arr[num++] = 0x1; /* relative port 1 (primary) */
1367 memset(arr + num, 0, 6);
1370 arr[num++] = 12; /* length tp descriptor */
1371 /* naa-5 target port identifier (A) */
1372 arr[num++] = 0x61; /* proto=sas, binary */
1373 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1374 arr[num++] = 0x0; /* reserved */
1375 arr[num++] = 0x8; /* length */
1376 put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1378 arr[num++] = 0x0; /* reserved */
1379 arr[num++] = 0x0; /* reserved */
1381 arr[num++] = 0x2; /* relative port 2 (secondary) */
1382 memset(arr + num, 0, 6);
1385 arr[num++] = 12; /* length tp descriptor */
1386 /* naa-5 target port identifier (B) */
1387 arr[num++] = 0x61; /* proto=sas, binary */
1388 arr[num++] = 0x93; /* PIV=1, target port, NAA */
1389 arr[num++] = 0x0; /* reserved */
1390 arr[num++] = 0x8; /* length */
1391 put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1398 static unsigned char vpd89_data[] = {
1399 /* from 4th byte */ 0,0,0,0,
1400 'l','i','n','u','x',' ',' ',' ',
1401 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1403 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1405 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1406 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1407 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1408 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1410 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1412 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1414 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1415 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1416 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1417 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1418 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1419 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1420 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1421 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1422 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1423 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1424 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1425 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1426 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1427 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1428 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1429 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1430 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1431 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1432 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1433 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1434 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1435 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1436 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1437 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1438 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1439 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1442 /* ATA Information VPD page */
1443 static int inquiry_vpd_89(unsigned char *arr)
1445 memcpy(arr, vpd89_data, sizeof(vpd89_data));
1446 return sizeof(vpd89_data);
1450 static unsigned char vpdb0_data[] = {
1451 /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1452 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1453 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1454 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1457 /* Block limits VPD page (SBC-3) */
1458 static int inquiry_vpd_b0(unsigned char *arr)
1462 memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1464 /* Optimal transfer length granularity */
1465 if (sdebug_opt_xferlen_exp != 0 &&
1466 sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1467 gran = 1 << sdebug_opt_xferlen_exp;
1469 gran = 1 << sdebug_physblk_exp;
1470 put_unaligned_be16(gran, arr + 2);
1472 /* Maximum Transfer Length */
1473 if (sdebug_store_sectors > 0x400)
1474 put_unaligned_be32(sdebug_store_sectors, arr + 4);
1476 /* Optimal Transfer Length */
1477 put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1480 /* Maximum Unmap LBA Count */
1481 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1483 /* Maximum Unmap Block Descriptor Count */
1484 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1487 /* Unmap Granularity Alignment */
1488 if (sdebug_unmap_alignment) {
1489 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1490 arr[28] |= 0x80; /* UGAVALID */
1493 /* Optimal Unmap Granularity */
1494 put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1496 /* Maximum WRITE SAME Length */
1497 put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1499 return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1501 return sizeof(vpdb0_data);
1504 /* Block device characteristics VPD page (SBC-3) */
1505 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1507 memset(arr, 0, 0x3c);
1509 arr[1] = 1; /* non rotating medium (e.g. solid state) */
1511 arr[3] = 5; /* less than 1.8" */
1512 if (devip->zmodel == BLK_ZONED_HA)
1513 arr[4] = 1 << 4; /* zoned field = 01b */
1518 /* Logical block provisioning VPD page (SBC-4) */
1519 static int inquiry_vpd_b2(unsigned char *arr)
1521 memset(arr, 0, 0x4);
1522 arr[0] = 0; /* threshold exponent */
1529 if (sdebug_lbprz && scsi_debug_lbp())
1530 arr[1] |= (sdebug_lbprz & 0x7) << 2; /* sbc4r07 and later */
1531 /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1532 /* minimum_percentage=0; provisioning_type=0 (unknown) */
1533 /* threshold_percentage=0 */
1537 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1538 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1540 memset(arr, 0, 0x3c);
1541 arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1543 * Set Optimal number of open sequential write preferred zones and
1544 * Optimal number of non-sequentially written sequential write
1545 * preferred zones fields to 'not reported' (0xffffffff). Leave other
1546 * fields set to zero, apart from Max. number of open swrz_s field.
1548 put_unaligned_be32(0xffffffff, &arr[4]);
1549 put_unaligned_be32(0xffffffff, &arr[8]);
1550 if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1551 put_unaligned_be32(devip->max_open, &arr[12]);
1553 put_unaligned_be32(0xffffffff, &arr[12]);
1557 #define SDEBUG_LONG_INQ_SZ 96
1558 #define SDEBUG_MAX_INQ_ARR_SZ 584
1560 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1562 unsigned char pq_pdt;
1564 unsigned char *cmd = scp->cmnd;
1565 int alloc_len, n, ret;
1566 bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1568 alloc_len = get_unaligned_be16(cmd + 3);
1569 arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1571 return DID_REQUEUE << 16;
1572 is_disk = (sdebug_ptype == TYPE_DISK);
1573 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
1574 is_disk_zbc = (is_disk || is_zbc);
1575 have_wlun = scsi_is_wlun(scp->device->lun);
1577 pq_pdt = TYPE_WLUN; /* present, wlun */
1578 else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1579 pq_pdt = 0x7f; /* not present, PQ=3, PDT=0x1f */
1581 pq_pdt = (sdebug_ptype & 0x1f);
1583 if (0x2 & cmd[1]) { /* CMDDT bit set */
1584 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1586 return check_condition_result;
1587 } else if (0x1 & cmd[1]) { /* EVPD bit set */
1588 int lu_id_num, port_group_id, target_dev_id, len;
1590 int host_no = devip->sdbg_host->shost->host_no;
1592 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1593 (devip->channel & 0x7f);
1594 if (sdebug_vpd_use_hostno == 0)
1596 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1597 (devip->target * 1000) + devip->lun);
1598 target_dev_id = ((host_no + 1) * 2000) +
1599 (devip->target * 1000) - 3;
1600 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1601 if (0 == cmd[2]) { /* supported vital product data pages */
1602 arr[1] = cmd[2]; /*sanity */
1604 arr[n++] = 0x0; /* this page */
1605 arr[n++] = 0x80; /* unit serial number */
1606 arr[n++] = 0x83; /* device identification */
1607 arr[n++] = 0x84; /* software interface ident. */
1608 arr[n++] = 0x85; /* management network addresses */
1609 arr[n++] = 0x86; /* extended inquiry */
1610 arr[n++] = 0x87; /* mode page policy */
1611 arr[n++] = 0x88; /* SCSI ports */
1612 if (is_disk_zbc) { /* SBC or ZBC */
1613 arr[n++] = 0x89; /* ATA information */
1614 arr[n++] = 0xb0; /* Block limits */
1615 arr[n++] = 0xb1; /* Block characteristics */
1617 arr[n++] = 0xb2; /* LB Provisioning */
1619 arr[n++] = 0xb6; /* ZB dev. char. */
1621 arr[3] = n - 4; /* number of supported VPD pages */
1622 } else if (0x80 == cmd[2]) { /* unit serial number */
1623 arr[1] = cmd[2]; /*sanity */
1625 memcpy(&arr[4], lu_id_str, len);
1626 } else if (0x83 == cmd[2]) { /* device identification */
1627 arr[1] = cmd[2]; /*sanity */
1628 arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1629 target_dev_id, lu_id_num,
1632 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1633 arr[1] = cmd[2]; /*sanity */
1634 arr[3] = inquiry_vpd_84(&arr[4]);
1635 } else if (0x85 == cmd[2]) { /* Management network addresses */
1636 arr[1] = cmd[2]; /*sanity */
1637 arr[3] = inquiry_vpd_85(&arr[4]);
1638 } else if (0x86 == cmd[2]) { /* extended inquiry */
1639 arr[1] = cmd[2]; /*sanity */
1640 arr[3] = 0x3c; /* number of following entries */
1641 if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1642 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
1643 else if (have_dif_prot)
1644 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
1646 arr[4] = 0x0; /* no protection stuff */
1647 arr[5] = 0x7; /* head of q, ordered + simple q's */
1648 } else if (0x87 == cmd[2]) { /* mode page policy */
1649 arr[1] = cmd[2]; /*sanity */
1650 arr[3] = 0x8; /* number of following entries */
1651 arr[4] = 0x2; /* disconnect-reconnect mp */
1652 arr[6] = 0x80; /* mlus, shared */
1653 arr[8] = 0x18; /* protocol specific lu */
1654 arr[10] = 0x82; /* mlus, per initiator port */
1655 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1656 arr[1] = cmd[2]; /*sanity */
1657 arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1658 } else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1659 arr[1] = cmd[2]; /*sanity */
1660 n = inquiry_vpd_89(&arr[4]);
1661 put_unaligned_be16(n, arr + 2);
1662 } else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1663 arr[1] = cmd[2]; /*sanity */
1664 arr[3] = inquiry_vpd_b0(&arr[4]);
1665 } else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1666 arr[1] = cmd[2]; /*sanity */
1667 arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1668 } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1669 arr[1] = cmd[2]; /*sanity */
1670 arr[3] = inquiry_vpd_b2(&arr[4]);
1671 } else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
1672 arr[1] = cmd[2]; /*sanity */
1673 arr[3] = inquiry_vpd_b6(devip, &arr[4]);
1675 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
1677 return check_condition_result;
1679 len = min(get_unaligned_be16(arr + 2) + 4, alloc_len);
1680 ret = fill_from_dev_buffer(scp, arr,
1681 min(len, SDEBUG_MAX_INQ_ARR_SZ));
1685 /* drops through here for a standard inquiry */
1686 arr[1] = sdebug_removable ? 0x80 : 0; /* Removable disk */
1687 arr[2] = sdebug_scsi_level;
1688 arr[3] = 2; /* response_data_format==2 */
1689 arr[4] = SDEBUG_LONG_INQ_SZ - 5;
1690 arr[5] = (int)have_dif_prot; /* PROTECT bit */
1691 if (sdebug_vpd_use_hostno == 0)
1692 arr[5] |= 0x10; /* claim: implicit TPGS */
1693 arr[6] = 0x10; /* claim: MultiP */
1694 /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
1695 arr[7] = 0xa; /* claim: LINKED + CMDQUE */
1696 memcpy(&arr[8], sdebug_inq_vendor_id, 8);
1697 memcpy(&arr[16], sdebug_inq_product_id, 16);
1698 memcpy(&arr[32], sdebug_inq_product_rev, 4);
1699 /* Use Vendor Specific area to place driver date in ASCII hex */
1700 memcpy(&arr[36], sdebug_version_date, 8);
1701 /* version descriptors (2 bytes each) follow */
1702 put_unaligned_be16(0xc0, arr + 58); /* SAM-6 no version claimed */
1703 put_unaligned_be16(0x5c0, arr + 60); /* SPC-5 no version claimed */
1705 if (is_disk) { /* SBC-4 no version claimed */
1706 put_unaligned_be16(0x600, arr + n);
1708 } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
1709 put_unaligned_be16(0x525, arr + n);
1711 } else if (is_zbc) { /* ZBC BSR INCITS 536 revision 05 */
1712 put_unaligned_be16(0x624, arr + n);
1715 put_unaligned_be16(0x2100, arr + n); /* SPL-4 no version claimed */
1716 ret = fill_from_dev_buffer(scp, arr,
1717 min_t(int, alloc_len, SDEBUG_LONG_INQ_SZ));
1722 /* See resp_iec_m_pg() for how this data is manipulated */
1723 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
1726 static int resp_requests(struct scsi_cmnd *scp,
1727 struct sdebug_dev_info *devip)
1729 unsigned char *cmd = scp->cmnd;
1730 unsigned char arr[SCSI_SENSE_BUFFERSIZE]; /* assume >= 18 bytes */
1731 bool dsense = !!(cmd[1] & 1);
1732 int alloc_len = cmd[4];
1734 int stopped_state = atomic_read(&devip->stopped);
1736 memset(arr, 0, sizeof(arr));
1737 if (stopped_state > 0) { /* some "pollable" data [spc6r02: 5.12.2] */
1741 arr[2] = LOGICAL_UNIT_NOT_READY;
1742 arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
1746 arr[2] = NOT_READY; /* NO_SENSE in sense_key */
1747 arr[7] = 0xa; /* 18 byte sense buffer */
1748 arr[12] = LOGICAL_UNIT_NOT_READY;
1749 arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
1751 } else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
1752 /* Information exceptions control mode page: TEST=1, MRIE=6 */
1755 arr[1] = 0x0; /* NO_SENSE in sense_key */
1756 arr[2] = THRESHOLD_EXCEEDED;
1757 arr[3] = 0xff; /* Failure prediction(false) */
1761 arr[2] = 0x0; /* NO_SENSE in sense_key */
1762 arr[7] = 0xa; /* 18 byte sense buffer */
1763 arr[12] = THRESHOLD_EXCEEDED;
1764 arr[13] = 0xff; /* Failure prediction(false) */
1766 } else { /* nothing to report */
1769 memset(arr, 0, len);
1772 memset(arr, 0, len);
1777 return fill_from_dev_buffer(scp, arr, min_t(int, len, alloc_len));
1780 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1782 unsigned char *cmd = scp->cmnd;
1783 int power_cond, want_stop, stopped_state;
1786 power_cond = (cmd[4] & 0xf0) >> 4;
1788 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
1789 return check_condition_result;
1791 want_stop = !(cmd[4] & 1);
1792 stopped_state = atomic_read(&devip->stopped);
1793 if (stopped_state == 2) {
1794 ktime_t now_ts = ktime_get_boottime();
1796 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
1797 u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
1799 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
1800 /* tur_ms_to_ready timer extinguished */
1801 atomic_set(&devip->stopped, 0);
1805 if (stopped_state == 2) {
1807 stopped_state = 1; /* dummy up success */
1808 } else { /* Disallow tur_ms_to_ready delay to be overridden */
1809 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
1810 return check_condition_result;
1814 changing = (stopped_state != want_stop);
1816 atomic_xchg(&devip->stopped, want_stop);
1817 if (!changing || (cmd[1] & 0x1)) /* state unchanged or IMMED bit set in cdb */
1818 return SDEG_RES_IMMED_MASK;
1823 static sector_t get_sdebug_capacity(void)
1825 static const unsigned int gibibyte = 1073741824;
1827 if (sdebug_virtual_gb > 0)
1828 return (sector_t)sdebug_virtual_gb *
1829 (gibibyte / sdebug_sector_size);
1831 return sdebug_store_sectors;
1834 #define SDEBUG_READCAP_ARR_SZ 8
1835 static int resp_readcap(struct scsi_cmnd *scp,
1836 struct sdebug_dev_info *devip)
1838 unsigned char arr[SDEBUG_READCAP_ARR_SZ];
1841 /* following just in case virtual_gb changed */
1842 sdebug_capacity = get_sdebug_capacity();
1843 memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
1844 if (sdebug_capacity < 0xffffffff) {
1845 capac = (unsigned int)sdebug_capacity - 1;
1846 put_unaligned_be32(capac, arr + 0);
1848 put_unaligned_be32(0xffffffff, arr + 0);
1849 put_unaligned_be16(sdebug_sector_size, arr + 6);
1850 return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
1853 #define SDEBUG_READCAP16_ARR_SZ 32
1854 static int resp_readcap16(struct scsi_cmnd *scp,
1855 struct sdebug_dev_info *devip)
1857 unsigned char *cmd = scp->cmnd;
1858 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1861 alloc_len = get_unaligned_be32(cmd + 10);
1862 /* following just in case virtual_gb changed */
1863 sdebug_capacity = get_sdebug_capacity();
1864 memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
1865 put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
1866 put_unaligned_be32(sdebug_sector_size, arr + 8);
1867 arr[13] = sdebug_physblk_exp & 0xf;
1868 arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
1870 if (scsi_debug_lbp()) {
1871 arr[14] |= 0x80; /* LBPME */
1872 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
1873 * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
1874 * in the wider field maps to 0 in this field.
1876 if (sdebug_lbprz & 1) /* precisely what the draft requires */
1880 arr[15] = sdebug_lowest_aligned & 0xff;
1882 if (have_dif_prot) {
1883 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
1884 arr[12] |= 1; /* PROT_EN */
1887 return fill_from_dev_buffer(scp, arr,
1888 min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
1891 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
1893 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
1894 struct sdebug_dev_info *devip)
1896 unsigned char *cmd = scp->cmnd;
1898 int host_no = devip->sdbg_host->shost->host_no;
1899 int port_group_a, port_group_b, port_a, port_b;
1903 alen = get_unaligned_be32(cmd + 6);
1904 arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
1906 return DID_REQUEUE << 16;
1908 * EVPD page 0x88 states we have two ports, one
1909 * real and a fake port with no device connected.
1910 * So we create two port groups with one port each
1911 * and set the group with port B to unavailable.
1913 port_a = 0x1; /* relative port A */
1914 port_b = 0x2; /* relative port B */
1915 port_group_a = (((host_no + 1) & 0x7f) << 8) +
1916 (devip->channel & 0x7f);
1917 port_group_b = (((host_no + 1) & 0x7f) << 8) +
1918 (devip->channel & 0x7f) + 0x80;
1921 * The asymmetric access state is cycled according to the host_id.
1924 if (sdebug_vpd_use_hostno == 0) {
1925 arr[n++] = host_no % 3; /* Asymm access state */
1926 arr[n++] = 0x0F; /* claim: all states are supported */
1928 arr[n++] = 0x0; /* Active/Optimized path */
1929 arr[n++] = 0x01; /* only support active/optimized paths */
1931 put_unaligned_be16(port_group_a, arr + n);
1933 arr[n++] = 0; /* Reserved */
1934 arr[n++] = 0; /* Status code */
1935 arr[n++] = 0; /* Vendor unique */
1936 arr[n++] = 0x1; /* One port per group */
1937 arr[n++] = 0; /* Reserved */
1938 arr[n++] = 0; /* Reserved */
1939 put_unaligned_be16(port_a, arr + n);
1941 arr[n++] = 3; /* Port unavailable */
1942 arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
1943 put_unaligned_be16(port_group_b, arr + n);
1945 arr[n++] = 0; /* Reserved */
1946 arr[n++] = 0; /* Status code */
1947 arr[n++] = 0; /* Vendor unique */
1948 arr[n++] = 0x1; /* One port per group */
1949 arr[n++] = 0; /* Reserved */
1950 arr[n++] = 0; /* Reserved */
1951 put_unaligned_be16(port_b, arr + n);
1955 put_unaligned_be32(rlen, arr + 0);
1958 * Return the smallest value of either
1959 * - The allocated length
1960 * - The constructed command length
1961 * - The maximum array size
1963 rlen = min(alen, n);
1964 ret = fill_from_dev_buffer(scp, arr,
1965 min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
1970 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
1971 struct sdebug_dev_info *devip)
1974 u8 reporting_opts, req_opcode, sdeb_i, supp;
1976 u32 alloc_len, a_len;
1977 int k, offset, len, errsts, count, bump, na;
1978 const struct opcode_info_t *oip;
1979 const struct opcode_info_t *r_oip;
1981 u8 *cmd = scp->cmnd;
1983 rctd = !!(cmd[2] & 0x80);
1984 reporting_opts = cmd[2] & 0x7;
1985 req_opcode = cmd[3];
1986 req_sa = get_unaligned_be16(cmd + 4);
1987 alloc_len = get_unaligned_be32(cmd + 6);
1988 if (alloc_len < 4 || alloc_len > 0xffff) {
1989 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
1990 return check_condition_result;
1992 if (alloc_len > 8192)
1996 arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
1998 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2000 return check_condition_result;
2002 switch (reporting_opts) {
2003 case 0: /* all commands */
2004 /* count number of commands */
2005 for (count = 0, oip = opcode_info_arr;
2006 oip->num_attached != 0xff; ++oip) {
2007 if (F_INV_OP & oip->flags)
2009 count += (oip->num_attached + 1);
2011 bump = rctd ? 20 : 8;
2012 put_unaligned_be32(count * bump, arr);
2013 for (offset = 4, oip = opcode_info_arr;
2014 oip->num_attached != 0xff && offset < a_len; ++oip) {
2015 if (F_INV_OP & oip->flags)
2017 na = oip->num_attached;
2018 arr[offset] = oip->opcode;
2019 put_unaligned_be16(oip->sa, arr + offset + 2);
2021 arr[offset + 5] |= 0x2;
2022 if (FF_SA & oip->flags)
2023 arr[offset + 5] |= 0x1;
2024 put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2026 put_unaligned_be16(0xa, arr + offset + 8);
2028 for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2029 if (F_INV_OP & oip->flags)
2032 arr[offset] = oip->opcode;
2033 put_unaligned_be16(oip->sa, arr + offset + 2);
2035 arr[offset + 5] |= 0x2;
2036 if (FF_SA & oip->flags)
2037 arr[offset + 5] |= 0x1;
2038 put_unaligned_be16(oip->len_mask[0],
2041 put_unaligned_be16(0xa,
2048 case 1: /* one command: opcode only */
2049 case 2: /* one command: opcode plus service action */
2050 case 3: /* one command: if sa==0 then opcode only else opcode+sa */
2051 sdeb_i = opcode_ind_arr[req_opcode];
2052 oip = &opcode_info_arr[sdeb_i];
2053 if (F_INV_OP & oip->flags) {
2057 if (1 == reporting_opts) {
2058 if (FF_SA & oip->flags) {
2059 mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2062 return check_condition_result;
2065 } else if (2 == reporting_opts &&
2066 0 == (FF_SA & oip->flags)) {
2067 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2068 kfree(arr); /* point at requested sa */
2069 return check_condition_result;
2071 if (0 == (FF_SA & oip->flags) &&
2072 req_opcode == oip->opcode)
2074 else if (0 == (FF_SA & oip->flags)) {
2075 na = oip->num_attached;
2076 for (k = 0, oip = oip->arrp; k < na;
2078 if (req_opcode == oip->opcode)
2081 supp = (k >= na) ? 1 : 3;
2082 } else if (req_sa != oip->sa) {
2083 na = oip->num_attached;
2084 for (k = 0, oip = oip->arrp; k < na;
2086 if (req_sa == oip->sa)
2089 supp = (k >= na) ? 1 : 3;
2093 u = oip->len_mask[0];
2094 put_unaligned_be16(u, arr + 2);
2095 arr[4] = oip->opcode;
2096 for (k = 1; k < u; ++k)
2097 arr[4 + k] = (k < 16) ?
2098 oip->len_mask[k] : 0xff;
2103 arr[1] = (rctd ? 0x80 : 0) | supp;
2105 put_unaligned_be16(0xa, arr + offset);
2110 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2112 return check_condition_result;
2114 offset = (offset < a_len) ? offset : a_len;
2115 len = (offset < alloc_len) ? offset : alloc_len;
2116 errsts = fill_from_dev_buffer(scp, arr, len);
2121 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2122 struct sdebug_dev_info *devip)
2127 u8 *cmd = scp->cmnd;
2129 memset(arr, 0, sizeof(arr));
2130 repd = !!(cmd[2] & 0x80);
2131 alloc_len = get_unaligned_be32(cmd + 6);
2132 if (alloc_len < 4) {
2133 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2134 return check_condition_result;
2136 arr[0] = 0xc8; /* ATS | ATSS | LURS */
2137 arr[1] = 0x1; /* ITNRS */
2144 len = (len < alloc_len) ? len : alloc_len;
2145 return fill_from_dev_buffer(scp, arr, len);
2148 /* <<Following mode page info copied from ST318451LW>> */
2150 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2151 { /* Read-Write Error Recovery page for mode_sense */
2152 unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2155 memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2157 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2158 return sizeof(err_recov_pg);
2161 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2162 { /* Disconnect-Reconnect page for mode_sense */
2163 unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2164 0, 0, 0, 0, 0, 0, 0, 0};
2166 memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2168 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2169 return sizeof(disconnect_pg);
2172 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2173 { /* Format device page for mode_sense */
2174 unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2175 0, 0, 0, 0, 0, 0, 0, 0,
2176 0, 0, 0, 0, 0x40, 0, 0, 0};
2178 memcpy(p, format_pg, sizeof(format_pg));
2179 put_unaligned_be16(sdebug_sectors_per, p + 10);
2180 put_unaligned_be16(sdebug_sector_size, p + 12);
2181 if (sdebug_removable)
2182 p[20] |= 0x20; /* should agree with INQUIRY */
2184 memset(p + 2, 0, sizeof(format_pg) - 2);
2185 return sizeof(format_pg);
2188 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2189 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2192 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2193 { /* Caching page for mode_sense */
2194 unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2195 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2196 unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2197 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0, 0, 0, 0, 0};
2199 if (SDEBUG_OPT_N_WCE & sdebug_opts)
2200 caching_pg[2] &= ~0x4; /* set WCE=0 (default WCE=1) */
2201 memcpy(p, caching_pg, sizeof(caching_pg));
2203 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2204 else if (2 == pcontrol)
2205 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2206 return sizeof(caching_pg);
2209 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2212 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2213 { /* Control mode page for mode_sense */
2214 unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2216 unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2220 ctrl_m_pg[2] |= 0x4;
2222 ctrl_m_pg[2] &= ~0x4;
2225 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2227 memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2229 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2230 else if (2 == pcontrol)
2231 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2232 return sizeof(ctrl_m_pg);
2236 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2237 { /* Informational Exceptions control mode page for mode_sense */
2238 unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2240 unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2243 memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2245 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2246 else if (2 == pcontrol)
2247 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2248 return sizeof(iec_m_pg);
2251 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2252 { /* SAS SSP mode page - short format for mode_sense */
2253 unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2254 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2256 memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2258 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2259 return sizeof(sas_sf_m_pg);
2263 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2265 { /* SAS phy control and discover mode page for mode_sense */
2266 unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2267 0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2268 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2269 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2270 0x2, 0, 0, 0, 0, 0, 0, 0,
2271 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2272 0, 0, 0, 0, 0, 0, 0, 0,
2273 0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2274 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2275 0, 0, 0, 0, 0, 0, 0, 0, /* insert SAS addr */
2276 0x3, 0, 0, 0, 0, 0, 0, 0,
2277 0x88, 0x99, 0, 0, 0, 0, 0, 0,
2278 0, 0, 0, 0, 0, 0, 0, 0,
2282 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2283 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2284 put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2285 put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2286 port_a = target_dev_id + 1;
2287 port_b = port_a + 1;
2288 memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2289 put_unaligned_be32(port_a, p + 20);
2290 put_unaligned_be32(port_b, p + 48 + 20);
2292 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2293 return sizeof(sas_pcd_m_pg);
2296 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2297 { /* SAS SSP shared protocol specific port mode subpage */
2298 unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2299 0, 0, 0, 0, 0, 0, 0, 0,
2302 memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2304 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2305 return sizeof(sas_sha_m_pg);
2308 #define SDEBUG_MAX_MSENSE_SZ 256
2310 static int resp_mode_sense(struct scsi_cmnd *scp,
2311 struct sdebug_dev_info *devip)
2313 int pcontrol, pcode, subpcode, bd_len;
2314 unsigned char dev_spec;
2315 int alloc_len, offset, len, target_dev_id;
2316 int target = scp->device->id;
2318 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
2319 unsigned char *cmd = scp->cmnd;
2320 bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
2322 dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
2323 pcontrol = (cmd[2] & 0xc0) >> 6;
2324 pcode = cmd[2] & 0x3f;
2326 msense_6 = (MODE_SENSE == cmd[0]);
2327 llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2328 is_disk = (sdebug_ptype == TYPE_DISK);
2329 is_zbc = (devip->zmodel != BLK_ZONED_NONE);
2330 if ((is_disk || is_zbc) && !dbd)
2331 bd_len = llbaa ? 16 : 8;
2334 alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2335 memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2336 if (0x3 == pcontrol) { /* Saving values not supported */
2337 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2338 return check_condition_result;
2340 target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2341 (devip->target * 1000) - 3;
2342 /* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2343 if (is_disk || is_zbc) {
2344 dev_spec = 0x10; /* =0x90 if WP=1 implies read-only */
2356 arr[4] = 0x1; /* set LONGLBA bit */
2357 arr[7] = bd_len; /* assume 255 or less */
2361 if ((bd_len > 0) && (!sdebug_capacity))
2362 sdebug_capacity = get_sdebug_capacity();
2365 if (sdebug_capacity > 0xfffffffe)
2366 put_unaligned_be32(0xffffffff, ap + 0);
2368 put_unaligned_be32(sdebug_capacity, ap + 0);
2369 put_unaligned_be16(sdebug_sector_size, ap + 6);
2372 } else if (16 == bd_len) {
2373 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2374 put_unaligned_be32(sdebug_sector_size, ap + 12);
2379 if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
2380 /* TODO: Control Extension page */
2381 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2382 return check_condition_result;
2387 case 0x1: /* Read-Write error recovery page, direct access */
2388 len = resp_err_recov_pg(ap, pcontrol, target);
2391 case 0x2: /* Disconnect-Reconnect page, all devices */
2392 len = resp_disconnect_pg(ap, pcontrol, target);
2395 case 0x3: /* Format device page, direct access */
2397 len = resp_format_pg(ap, pcontrol, target);
2402 case 0x8: /* Caching page, direct access */
2403 if (is_disk || is_zbc) {
2404 len = resp_caching_pg(ap, pcontrol, target);
2409 case 0xa: /* Control Mode page, all devices */
2410 len = resp_ctrl_m_pg(ap, pcontrol, target);
2413 case 0x19: /* if spc==1 then sas phy, control+discover */
2414 if ((subpcode > 0x2) && (subpcode < 0xff)) {
2415 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2416 return check_condition_result;
2419 if ((0x0 == subpcode) || (0xff == subpcode))
2420 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2421 if ((0x1 == subpcode) || (0xff == subpcode))
2422 len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2424 if ((0x2 == subpcode) || (0xff == subpcode))
2425 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2428 case 0x1c: /* Informational Exceptions Mode page, all devices */
2429 len = resp_iec_m_pg(ap, pcontrol, target);
2432 case 0x3f: /* Read all Mode pages */
2433 if ((0 == subpcode) || (0xff == subpcode)) {
2434 len = resp_err_recov_pg(ap, pcontrol, target);
2435 len += resp_disconnect_pg(ap + len, pcontrol, target);
2437 len += resp_format_pg(ap + len, pcontrol,
2439 len += resp_caching_pg(ap + len, pcontrol,
2441 } else if (is_zbc) {
2442 len += resp_caching_pg(ap + len, pcontrol,
2445 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2446 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2447 if (0xff == subpcode) {
2448 len += resp_sas_pcd_m_spg(ap + len, pcontrol,
2449 target, target_dev_id);
2450 len += resp_sas_sha_m_spg(ap + len, pcontrol);
2452 len += resp_iec_m_pg(ap + len, pcontrol, target);
2455 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2456 return check_condition_result;
2464 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2465 return check_condition_result;
2468 arr[0] = offset - 1;
2470 put_unaligned_be16((offset - 2), arr + 0);
2471 return fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, offset));
2474 #define SDEBUG_MAX_MSELECT_SZ 512
2476 static int resp_mode_select(struct scsi_cmnd *scp,
2477 struct sdebug_dev_info *devip)
2479 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2480 int param_len, res, mpage;
2481 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2482 unsigned char *cmd = scp->cmnd;
2483 int mselect6 = (MODE_SELECT == cmd[0]);
2485 memset(arr, 0, sizeof(arr));
2488 param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2489 if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2490 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2491 return check_condition_result;
2493 res = fetch_to_dev_buffer(scp, arr, param_len);
2495 return DID_ERROR << 16;
2496 else if (sdebug_verbose && (res < param_len))
2497 sdev_printk(KERN_INFO, scp->device,
2498 "%s: cdb indicated=%d, IO sent=%d bytes\n",
2499 __func__, param_len, res);
2500 md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2501 bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2503 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2504 return check_condition_result;
2506 off = bd_len + (mselect6 ? 4 : 8);
2507 mpage = arr[off] & 0x3f;
2508 ps = !!(arr[off] & 0x80);
2510 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2511 return check_condition_result;
2513 spf = !!(arr[off] & 0x40);
2514 pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2516 if ((pg_len + off) > param_len) {
2517 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2518 PARAMETER_LIST_LENGTH_ERR, 0);
2519 return check_condition_result;
2522 case 0x8: /* Caching Mode page */
2523 if (caching_pg[1] == arr[off + 1]) {
2524 memcpy(caching_pg + 2, arr + off + 2,
2525 sizeof(caching_pg) - 2);
2526 goto set_mode_changed_ua;
2529 case 0xa: /* Control Mode page */
2530 if (ctrl_m_pg[1] == arr[off + 1]) {
2531 memcpy(ctrl_m_pg + 2, arr + off + 2,
2532 sizeof(ctrl_m_pg) - 2);
2533 if (ctrl_m_pg[4] & 0x8)
2537 sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2538 goto set_mode_changed_ua;
2541 case 0x1c: /* Informational Exceptions Mode page */
2542 if (iec_m_pg[1] == arr[off + 1]) {
2543 memcpy(iec_m_pg + 2, arr + off + 2,
2544 sizeof(iec_m_pg) - 2);
2545 goto set_mode_changed_ua;
2551 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2552 return check_condition_result;
2553 set_mode_changed_ua:
2554 set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2558 static int resp_temp_l_pg(unsigned char *arr)
2560 unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2561 0x0, 0x1, 0x3, 0x2, 0x0, 65,
2564 memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2565 return sizeof(temp_l_pg);
2568 static int resp_ie_l_pg(unsigned char *arr)
2570 unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2573 memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2574 if (iec_m_pg[2] & 0x4) { /* TEST bit set */
2575 arr[4] = THRESHOLD_EXCEEDED;
2578 return sizeof(ie_l_pg);
2581 #define SDEBUG_MAX_LSENSE_SZ 512
2583 static int resp_log_sense(struct scsi_cmnd *scp,
2584 struct sdebug_dev_info *devip)
2586 int ppc, sp, pcode, subpcode, alloc_len, len, n;
2587 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2588 unsigned char *cmd = scp->cmnd;
2590 memset(arr, 0, sizeof(arr));
2594 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
2595 return check_condition_result;
2597 pcode = cmd[2] & 0x3f;
2598 subpcode = cmd[3] & 0xff;
2599 alloc_len = get_unaligned_be16(cmd + 7);
2601 if (0 == subpcode) {
2603 case 0x0: /* Supported log pages log page */
2605 arr[n++] = 0x0; /* this page */
2606 arr[n++] = 0xd; /* Temperature */
2607 arr[n++] = 0x2f; /* Informational exceptions */
2610 case 0xd: /* Temperature log page */
2611 arr[3] = resp_temp_l_pg(arr + 4);
2613 case 0x2f: /* Informational exceptions log page */
2614 arr[3] = resp_ie_l_pg(arr + 4);
2617 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2618 return check_condition_result;
2620 } else if (0xff == subpcode) {
2624 case 0x0: /* Supported log pages and subpages log page */
2627 arr[n++] = 0x0; /* 0,0 page */
2629 arr[n++] = 0xff; /* this page */
2631 arr[n++] = 0x0; /* Temperature */
2633 arr[n++] = 0x0; /* Informational exceptions */
2636 case 0xd: /* Temperature subpages */
2639 arr[n++] = 0x0; /* Temperature */
2642 case 0x2f: /* Informational exceptions subpages */
2645 arr[n++] = 0x0; /* Informational exceptions */
2649 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2650 return check_condition_result;
2653 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2654 return check_condition_result;
2656 len = min_t(int, get_unaligned_be16(arr + 2) + 4, alloc_len);
2657 return fill_from_dev_buffer(scp, arr,
2658 min_t(int, len, SDEBUG_MAX_INQ_ARR_SZ));
2661 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
2663 return devip->nr_zones != 0;
2666 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
2667 unsigned long long lba)
2669 return &devip->zstate[lba >> devip->zsize_shift];
2672 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
2674 return zsp->z_type == ZBC_ZONE_TYPE_CNV;
2677 static void zbc_close_zone(struct sdebug_dev_info *devip,
2678 struct sdeb_zone_state *zsp)
2680 enum sdebug_z_cond zc;
2682 if (zbc_zone_is_conv(zsp))
2686 if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
2689 if (zc == ZC2_IMPLICIT_OPEN)
2690 devip->nr_imp_open--;
2692 devip->nr_exp_open--;
2694 if (zsp->z_wp == zsp->z_start) {
2695 zsp->z_cond = ZC1_EMPTY;
2697 zsp->z_cond = ZC4_CLOSED;
2702 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
2704 struct sdeb_zone_state *zsp = &devip->zstate[0];
2707 for (i = 0; i < devip->nr_zones; i++, zsp++) {
2708 if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
2709 zbc_close_zone(devip, zsp);
2715 static void zbc_open_zone(struct sdebug_dev_info *devip,
2716 struct sdeb_zone_state *zsp, bool explicit)
2718 enum sdebug_z_cond zc;
2720 if (zbc_zone_is_conv(zsp))
2724 if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
2725 (!explicit && zc == ZC2_IMPLICIT_OPEN))
2728 /* Close an implicit open zone if necessary */
2729 if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
2730 zbc_close_zone(devip, zsp);
2731 else if (devip->max_open &&
2732 devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
2733 zbc_close_imp_open_zone(devip);
2735 if (zsp->z_cond == ZC4_CLOSED)
2738 zsp->z_cond = ZC3_EXPLICIT_OPEN;
2739 devip->nr_exp_open++;
2741 zsp->z_cond = ZC2_IMPLICIT_OPEN;
2742 devip->nr_imp_open++;
2746 static void zbc_inc_wp(struct sdebug_dev_info *devip,
2747 unsigned long long lba, unsigned int num)
2749 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2750 unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
2752 if (zbc_zone_is_conv(zsp))
2755 if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2757 if (zsp->z_wp >= zend)
2758 zsp->z_cond = ZC5_FULL;
2763 if (lba != zsp->z_wp)
2764 zsp->z_non_seq_resource = true;
2770 } else if (end > zsp->z_wp) {
2776 if (zsp->z_wp >= zend)
2777 zsp->z_cond = ZC5_FULL;
2783 zend = zsp->z_start + zsp->z_size;
2788 static int check_zbc_access_params(struct scsi_cmnd *scp,
2789 unsigned long long lba, unsigned int num, bool write)
2791 struct scsi_device *sdp = scp->device;
2792 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2793 struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
2794 struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
2797 if (devip->zmodel == BLK_ZONED_HA)
2799 /* For host-managed, reads cannot cross zone types boundaries */
2800 if (zsp_end != zsp &&
2801 zbc_zone_is_conv(zsp) &&
2802 !zbc_zone_is_conv(zsp_end)) {
2803 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2806 return check_condition_result;
2811 /* No restrictions for writes within conventional zones */
2812 if (zbc_zone_is_conv(zsp)) {
2813 if (!zbc_zone_is_conv(zsp_end)) {
2814 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2816 WRITE_BOUNDARY_ASCQ);
2817 return check_condition_result;
2822 if (zsp->z_type == ZBC_ZONE_TYPE_SWR) {
2823 /* Writes cannot cross sequential zone boundaries */
2824 if (zsp_end != zsp) {
2825 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2827 WRITE_BOUNDARY_ASCQ);
2828 return check_condition_result;
2830 /* Cannot write full zones */
2831 if (zsp->z_cond == ZC5_FULL) {
2832 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2833 INVALID_FIELD_IN_CDB, 0);
2834 return check_condition_result;
2836 /* Writes must be aligned to the zone WP */
2837 if (lba != zsp->z_wp) {
2838 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2840 UNALIGNED_WRITE_ASCQ);
2841 return check_condition_result;
2845 /* Handle implicit open of closed and empty zones */
2846 if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
2847 if (devip->max_open &&
2848 devip->nr_exp_open >= devip->max_open) {
2849 mk_sense_buffer(scp, DATA_PROTECT,
2852 return check_condition_result;
2854 zbc_open_zone(devip, zsp, false);
2860 static inline int check_device_access_params
2861 (struct scsi_cmnd *scp, unsigned long long lba,
2862 unsigned int num, bool write)
2864 struct scsi_device *sdp = scp->device;
2865 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
2867 if (lba + num > sdebug_capacity) {
2868 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
2869 return check_condition_result;
2871 /* transfer length excessive (tie in to block limits VPD page) */
2872 if (num > sdebug_store_sectors) {
2873 /* needs work to find which cdb byte 'num' comes from */
2874 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
2875 return check_condition_result;
2877 if (write && unlikely(sdebug_wp)) {
2878 mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
2879 return check_condition_result;
2881 if (sdebug_dev_is_zoned(devip))
2882 return check_zbc_access_params(scp, lba, num, write);
2888 * Note: if BUG_ON() fires it usually indicates a problem with the parser
2889 * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
2890 * that access any of the "stores" in struct sdeb_store_info should call this
2891 * function with bug_if_fake_rw set to true.
2893 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
2894 bool bug_if_fake_rw)
2896 if (sdebug_fake_rw) {
2897 BUG_ON(bug_if_fake_rw); /* See note above */
2900 return xa_load(per_store_ap, devip->sdbg_host->si_idx);
2903 /* Returns number of bytes copied or -1 if error. */
2904 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
2905 u32 sg_skip, u64 lba, u32 num, bool do_write)
2908 u64 block, rest = 0;
2909 enum dma_data_direction dir;
2910 struct scsi_data_buffer *sdb = &scp->sdb;
2914 dir = DMA_TO_DEVICE;
2915 write_since_sync = true;
2917 dir = DMA_FROM_DEVICE;
2920 if (!sdb->length || !sip)
2922 if (scp->sc_data_direction != dir)
2926 block = do_div(lba, sdebug_store_sectors);
2927 if (block + num > sdebug_store_sectors)
2928 rest = block + num - sdebug_store_sectors;
2930 ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2931 fsp + (block * sdebug_sector_size),
2932 (num - rest) * sdebug_sector_size, sg_skip, do_write);
2933 if (ret != (num - rest) * sdebug_sector_size)
2937 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
2938 fsp, rest * sdebug_sector_size,
2939 sg_skip + ((num - rest) * sdebug_sector_size),
2946 /* Returns number of bytes copied or -1 if error. */
2947 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
2949 struct scsi_data_buffer *sdb = &scp->sdb;
2953 if (scp->sc_data_direction != DMA_TO_DEVICE)
2955 return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
2956 num * sdebug_sector_size, 0, true);
2959 /* If sip->storep+lba compares equal to arr(num), then copy top half of
2960 * arr into sip->storep+lba and return true. If comparison fails then
2962 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
2963 const u8 *arr, bool compare_only)
2966 u64 block, rest = 0;
2967 u32 store_blks = sdebug_store_sectors;
2968 u32 lb_size = sdebug_sector_size;
2969 u8 *fsp = sip->storep;
2971 block = do_div(lba, store_blks);
2972 if (block + num > store_blks)
2973 rest = block + num - store_blks;
2975 res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2979 res = memcmp(fsp, arr + ((num - rest) * lb_size),
2985 arr += num * lb_size;
2986 memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
2988 memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
2992 static __be16 dif_compute_csum(const void *buf, int len)
2997 csum = (__force __be16)ip_compute_csum(buf, len);
2999 csum = cpu_to_be16(crc_t10dif(buf, len));
3004 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3005 sector_t sector, u32 ei_lba)
3007 __be16 csum = dif_compute_csum(data, sdebug_sector_size);
3009 if (sdt->guard_tag != csum) {
3010 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3011 (unsigned long)sector,
3012 be16_to_cpu(sdt->guard_tag),
3016 if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3017 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3018 pr_err("REF check failed on sector %lu\n",
3019 (unsigned long)sector);
3022 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3023 be32_to_cpu(sdt->ref_tag) != ei_lba) {
3024 pr_err("REF check failed on sector %lu\n",
3025 (unsigned long)sector);
3031 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3032 unsigned int sectors, bool read)
3036 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3037 scp->device->hostdata, true);
3038 struct t10_pi_tuple *dif_storep = sip->dif_storep;
3039 const void *dif_store_end = dif_storep + sdebug_store_sectors;
3040 struct sg_mapping_iter miter;
3042 /* Bytes of protection data to copy into sgl */
3043 resid = sectors * sizeof(*dif_storep);
3045 sg_miter_start(&miter, scsi_prot_sglist(scp),
3046 scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3047 (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3049 while (sg_miter_next(&miter) && resid > 0) {
3050 size_t len = min_t(size_t, miter.length, resid);
3051 void *start = dif_store(sip, sector);
3054 if (dif_store_end < start + len)
3055 rest = start + len - dif_store_end;
3060 memcpy(paddr, start, len - rest);
3062 memcpy(start, paddr, len - rest);
3066 memcpy(paddr + len - rest, dif_storep, rest);
3068 memcpy(dif_storep, paddr + len - rest, rest);
3071 sector += len / sizeof(*dif_storep);
3074 sg_miter_stop(&miter);
3077 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3078 unsigned int sectors, u32 ei_lba)
3083 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3084 scp->device->hostdata, true);
3085 struct t10_pi_tuple *sdt;
3087 for (i = 0; i < sectors; i++, ei_lba++) {
3088 sector = start_sec + i;
3089 sdt = dif_store(sip, sector);
3091 if (sdt->app_tag == cpu_to_be16(0xffff))
3095 * Because scsi_debug acts as both initiator and
3096 * target we proceed to verify the PI even if
3097 * RDPROTECT=3. This is done so the "initiator" knows
3098 * which type of error to return. Otherwise we would
3099 * have to iterate over the PI twice.
3101 if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3102 ret = dif_verify(sdt, lba2fake_store(sip, sector),
3111 dif_copy_prot(scp, start_sec, sectors, true);
3117 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3124 struct sdeb_store_info *sip = devip2sip(devip, true);
3125 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
3126 u8 *cmd = scp->cmnd;
3131 lba = get_unaligned_be64(cmd + 2);
3132 num = get_unaligned_be32(cmd + 10);
3137 lba = get_unaligned_be32(cmd + 2);
3138 num = get_unaligned_be16(cmd + 7);
3143 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3144 (u32)(cmd[1] & 0x1f) << 16;
3145 num = (0 == cmd[4]) ? 256 : cmd[4];
3150 lba = get_unaligned_be32(cmd + 2);
3151 num = get_unaligned_be32(cmd + 6);
3154 case XDWRITEREAD_10:
3156 lba = get_unaligned_be32(cmd + 2);
3157 num = get_unaligned_be16(cmd + 7);
3160 default: /* assume READ(32) */
3161 lba = get_unaligned_be64(cmd + 12);
3162 ei_lba = get_unaligned_be32(cmd + 20);
3163 num = get_unaligned_be32(cmd + 28);
3167 if (unlikely(have_dif_prot && check_prot)) {
3168 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3170 mk_sense_invalid_opcode(scp);
3171 return check_condition_result;
3173 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3174 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3175 (cmd[1] & 0xe0) == 0)
3176 sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3179 if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3180 atomic_read(&sdeb_inject_pending))) {
3182 atomic_set(&sdeb_inject_pending, 0);
3185 ret = check_device_access_params(scp, lba, num, false);
3188 if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3189 (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3190 ((lba + num) > sdebug_medium_error_start))) {
3191 /* claim unrecoverable read error */
3192 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3193 /* set info field and valid bit for fixed descriptor */
3194 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3195 scp->sense_buffer[0] |= 0x80; /* Valid bit */
3196 ret = (lba < OPT_MEDIUM_ERR_ADDR)
3197 ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3198 put_unaligned_be32(ret, scp->sense_buffer + 3);
3200 scsi_set_resid(scp, scsi_bufflen(scp));
3201 return check_condition_result;
3204 read_lock(macc_lckp);
3207 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3208 switch (prot_verify_read(scp, lba, num, ei_lba)) {
3209 case 1: /* Guard tag error */
3210 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3211 read_unlock(macc_lckp);
3212 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3213 return check_condition_result;
3214 } else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3215 read_unlock(macc_lckp);
3216 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3217 return illegal_condition_result;
3220 case 3: /* Reference tag error */
3221 if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3222 read_unlock(macc_lckp);
3223 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3224 return check_condition_result;
3225 } else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3226 read_unlock(macc_lckp);
3227 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3228 return illegal_condition_result;
3234 ret = do_device_access(sip, scp, 0, lba, num, false);
3235 read_unlock(macc_lckp);
3236 if (unlikely(ret == -1))
3237 return DID_ERROR << 16;
3239 scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3241 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3242 atomic_read(&sdeb_inject_pending))) {
3243 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3244 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3245 atomic_set(&sdeb_inject_pending, 0);
3246 return check_condition_result;
3247 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3248 /* Logical block guard check failed */
3249 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3250 atomic_set(&sdeb_inject_pending, 0);
3251 return illegal_condition_result;
3252 } else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3253 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3254 atomic_set(&sdeb_inject_pending, 0);
3255 return illegal_condition_result;
3261 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3262 unsigned int sectors, u32 ei_lba)
3265 struct t10_pi_tuple *sdt;
3267 sector_t sector = start_sec;
3270 struct sg_mapping_iter diter;
3271 struct sg_mapping_iter piter;
3273 BUG_ON(scsi_sg_count(SCpnt) == 0);
3274 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3276 sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3277 scsi_prot_sg_count(SCpnt),
3278 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3279 sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3280 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3282 /* For each protection page */
3283 while (sg_miter_next(&piter)) {
3285 if (WARN_ON(!sg_miter_next(&diter))) {
3290 for (ppage_offset = 0; ppage_offset < piter.length;
3291 ppage_offset += sizeof(struct t10_pi_tuple)) {
3292 /* If we're at the end of the current
3293 * data page advance to the next one
3295 if (dpage_offset >= diter.length) {
3296 if (WARN_ON(!sg_miter_next(&diter))) {
3303 sdt = piter.addr + ppage_offset;
3304 daddr = diter.addr + dpage_offset;
3306 if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3307 ret = dif_verify(sdt, daddr, sector, ei_lba);
3314 dpage_offset += sdebug_sector_size;
3316 diter.consumed = dpage_offset;
3317 sg_miter_stop(&diter);
3319 sg_miter_stop(&piter);
3321 dif_copy_prot(SCpnt, start_sec, sectors, false);
3328 sg_miter_stop(&diter);
3329 sg_miter_stop(&piter);
3333 static unsigned long lba_to_map_index(sector_t lba)
3335 if (sdebug_unmap_alignment)
3336 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3337 sector_div(lba, sdebug_unmap_granularity);
3341 static sector_t map_index_to_lba(unsigned long index)
3343 sector_t lba = index * sdebug_unmap_granularity;
3345 if (sdebug_unmap_alignment)
3346 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3350 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3354 unsigned int mapped;
3355 unsigned long index;
3358 index = lba_to_map_index(lba);
3359 mapped = test_bit(index, sip->map_storep);
3362 next = find_next_zero_bit(sip->map_storep, map_size, index);
3364 next = find_next_bit(sip->map_storep, map_size, index);
3366 end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next));
3371 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3374 sector_t end = lba + len;
3377 unsigned long index = lba_to_map_index(lba);
3379 if (index < map_size)
3380 set_bit(index, sip->map_storep);
3382 lba = map_index_to_lba(index + 1);
3386 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3389 sector_t end = lba + len;
3390 u8 *fsp = sip->storep;
3393 unsigned long index = lba_to_map_index(lba);
3395 if (lba == map_index_to_lba(index) &&
3396 lba + sdebug_unmap_granularity <= end &&
3398 clear_bit(index, sip->map_storep);
3399 if (sdebug_lbprz) { /* for LBPRZ=2 return 0xff_s */
3400 memset(fsp + lba * sdebug_sector_size,
3401 (sdebug_lbprz & 1) ? 0 : 0xff,
3402 sdebug_sector_size *
3403 sdebug_unmap_granularity);
3405 if (sip->dif_storep) {
3406 memset(sip->dif_storep + lba, 0xff,
3407 sizeof(*sip->dif_storep) *
3408 sdebug_unmap_granularity);
3411 lba = map_index_to_lba(index + 1);
3415 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3422 struct sdeb_store_info *sip = devip2sip(devip, true);
3423 rwlock_t *macc_lckp = &sip->macc_lck;
3424 u8 *cmd = scp->cmnd;
3429 lba = get_unaligned_be64(cmd + 2);
3430 num = get_unaligned_be32(cmd + 10);
3435 lba = get_unaligned_be32(cmd + 2);
3436 num = get_unaligned_be16(cmd + 7);
3441 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3442 (u32)(cmd[1] & 0x1f) << 16;
3443 num = (0 == cmd[4]) ? 256 : cmd[4];
3448 lba = get_unaligned_be32(cmd + 2);
3449 num = get_unaligned_be32(cmd + 6);
3452 case 0x53: /* XDWRITEREAD(10) */
3454 lba = get_unaligned_be32(cmd + 2);
3455 num = get_unaligned_be16(cmd + 7);
3458 default: /* assume WRITE(32) */
3459 lba = get_unaligned_be64(cmd + 12);
3460 ei_lba = get_unaligned_be32(cmd + 20);
3461 num = get_unaligned_be32(cmd + 28);
3465 if (unlikely(have_dif_prot && check_prot)) {
3466 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3468 mk_sense_invalid_opcode(scp);
3469 return check_condition_result;
3471 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3472 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3473 (cmd[1] & 0xe0) == 0)
3474 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3478 write_lock(macc_lckp);
3479 ret = check_device_access_params(scp, lba, num, true);
3481 write_unlock(macc_lckp);
3486 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3487 switch (prot_verify_write(scp, lba, num, ei_lba)) {
3488 case 1: /* Guard tag error */
3489 if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3490 write_unlock(macc_lckp);
3491 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3492 return illegal_condition_result;
3493 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3494 write_unlock(macc_lckp);
3495 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3496 return check_condition_result;
3499 case 3: /* Reference tag error */
3500 if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3501 write_unlock(macc_lckp);
3502 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3503 return illegal_condition_result;
3504 } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
3505 write_unlock(macc_lckp);
3506 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3507 return check_condition_result;
3513 ret = do_device_access(sip, scp, 0, lba, num, true);
3514 if (unlikely(scsi_debug_lbp()))
3515 map_region(sip, lba, num);
3516 /* If ZBC zone then bump its write pointer */
3517 if (sdebug_dev_is_zoned(devip))
3518 zbc_inc_wp(devip, lba, num);
3519 write_unlock(macc_lckp);
3520 if (unlikely(-1 == ret))
3521 return DID_ERROR << 16;
3522 else if (unlikely(sdebug_verbose &&
3523 (ret < (num * sdebug_sector_size))))
3524 sdev_printk(KERN_INFO, scp->device,
3525 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3526 my_name, num * sdebug_sector_size, ret);
3528 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3529 atomic_read(&sdeb_inject_pending))) {
3530 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3531 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3532 atomic_set(&sdeb_inject_pending, 0);
3533 return check_condition_result;
3534 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3535 /* Logical block guard check failed */
3536 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3537 atomic_set(&sdeb_inject_pending, 0);
3538 return illegal_condition_result;
3539 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3540 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3541 atomic_set(&sdeb_inject_pending, 0);
3542 return illegal_condition_result;
3549 * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
3550 * No READ GATHERED yet (requires bidi or long cdb holding gather list).
3552 static int resp_write_scat(struct scsi_cmnd *scp,
3553 struct sdebug_dev_info *devip)
3555 u8 *cmd = scp->cmnd;
3558 struct sdeb_store_info *sip = devip2sip(devip, true);
3559 rwlock_t *macc_lckp = &sip->macc_lck;
3561 u16 lbdof, num_lrd, k;
3562 u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
3563 u32 lb_size = sdebug_sector_size;
3568 static const u32 lrd_size = 32; /* + parameter list header size */
3570 if (cmd[0] == VARIABLE_LENGTH_CMD) {
3572 wrprotect = (cmd[10] >> 5) & 0x7;
3573 lbdof = get_unaligned_be16(cmd + 12);
3574 num_lrd = get_unaligned_be16(cmd + 16);
3575 bt_len = get_unaligned_be32(cmd + 28);
3576 } else { /* that leaves WRITE SCATTERED(16) */
3578 wrprotect = (cmd[2] >> 5) & 0x7;
3579 lbdof = get_unaligned_be16(cmd + 4);
3580 num_lrd = get_unaligned_be16(cmd + 8);
3581 bt_len = get_unaligned_be32(cmd + 10);
3582 if (unlikely(have_dif_prot)) {
3583 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3585 mk_sense_invalid_opcode(scp);
3586 return illegal_condition_result;
3588 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3589 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3591 sdev_printk(KERN_ERR, scp->device,
3592 "Unprotected WR to DIF device\n");
3595 if ((num_lrd == 0) || (bt_len == 0))
3596 return 0; /* T10 says these do-nothings are not errors */
3599 sdev_printk(KERN_INFO, scp->device,
3600 "%s: %s: LB Data Offset field bad\n",
3602 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3603 return illegal_condition_result;
3605 lbdof_blen = lbdof * lb_size;
3606 if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
3608 sdev_printk(KERN_INFO, scp->device,
3609 "%s: %s: LBA range descriptors don't fit\n",
3611 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3612 return illegal_condition_result;
3614 lrdp = kzalloc(lbdof_blen, GFP_ATOMIC);
3616 return SCSI_MLQUEUE_HOST_BUSY;
3618 sdev_printk(KERN_INFO, scp->device,
3619 "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
3620 my_name, __func__, lbdof_blen);
3621 res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
3623 ret = DID_ERROR << 16;
3627 write_lock(macc_lckp);
3628 sg_off = lbdof_blen;
3629 /* Spec says Buffer xfer Length field in number of LBs in dout */
3631 for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
3632 lba = get_unaligned_be64(up + 0);
3633 num = get_unaligned_be32(up + 8);
3635 sdev_printk(KERN_INFO, scp->device,
3636 "%s: %s: k=%d LBA=0x%llx num=%u sg_off=%u\n",
3637 my_name, __func__, k, lba, num, sg_off);
3640 ret = check_device_access_params(scp, lba, num, true);
3642 goto err_out_unlock;
3643 num_by = num * lb_size;
3644 ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
3646 if ((cum_lb + num) > bt_len) {
3648 sdev_printk(KERN_INFO, scp->device,
3649 "%s: %s: sum of blocks > data provided\n",
3651 mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
3653 ret = illegal_condition_result;
3654 goto err_out_unlock;
3658 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3659 int prot_ret = prot_verify_write(scp, lba, num,
3663 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
3665 ret = illegal_condition_result;
3666 goto err_out_unlock;
3670 ret = do_device_access(sip, scp, sg_off, lba, num, true);
3671 /* If ZBC zone then bump its write pointer */
3672 if (sdebug_dev_is_zoned(devip))
3673 zbc_inc_wp(devip, lba, num);
3674 if (unlikely(scsi_debug_lbp()))
3675 map_region(sip, lba, num);
3676 if (unlikely(-1 == ret)) {
3677 ret = DID_ERROR << 16;
3678 goto err_out_unlock;
3679 } else if (unlikely(sdebug_verbose && (ret < num_by)))
3680 sdev_printk(KERN_INFO, scp->device,
3681 "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
3682 my_name, num_by, ret);
3684 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3685 atomic_read(&sdeb_inject_pending))) {
3686 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3687 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3688 atomic_set(&sdeb_inject_pending, 0);
3689 ret = check_condition_result;
3690 goto err_out_unlock;
3691 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3692 /* Logical block guard check failed */
3693 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3694 atomic_set(&sdeb_inject_pending, 0);
3695 ret = illegal_condition_result;
3696 goto err_out_unlock;
3697 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
3698 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3699 atomic_set(&sdeb_inject_pending, 0);
3700 ret = illegal_condition_result;
3701 goto err_out_unlock;
3709 write_unlock(macc_lckp);
3715 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3716 u32 ei_lba, bool unmap, bool ndob)
3718 struct scsi_device *sdp = scp->device;
3719 struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3720 unsigned long long i;
3722 u32 lb_size = sdebug_sector_size;
3724 struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3725 scp->device->hostdata, true);
3726 rwlock_t *macc_lckp = &sip->macc_lck;
3730 write_lock(macc_lckp);
3732 ret = check_device_access_params(scp, lba, num, true);
3734 write_unlock(macc_lckp);
3738 if (unmap && scsi_debug_lbp()) {
3739 unmap_region(sip, lba, num);
3743 block = do_div(lbaa, sdebug_store_sectors);
3744 /* if ndob then zero 1 logical block, else fetch 1 logical block */
3746 fs1p = fsp + (block * lb_size);
3748 memset(fs1p, 0, lb_size);
3751 ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3754 write_unlock(&sip->macc_lck);
3755 return DID_ERROR << 16;
3756 } else if (sdebug_verbose && !ndob && (ret < lb_size))
3757 sdev_printk(KERN_INFO, scp->device,
3758 "%s: %s: lb size=%u, IO sent=%d bytes\n",
3759 my_name, "write same", lb_size, ret);
3761 /* Copy first sector to remaining blocks */
3762 for (i = 1 ; i < num ; i++) {
3764 block = do_div(lbaa, sdebug_store_sectors);
3765 memmove(fsp + (block * lb_size), fs1p, lb_size);
3767 if (scsi_debug_lbp())
3768 map_region(sip, lba, num);
3769 /* If ZBC zone then bump its write pointer */
3770 if (sdebug_dev_is_zoned(devip))
3771 zbc_inc_wp(devip, lba, num);
3773 write_unlock(macc_lckp);
3778 static int resp_write_same_10(struct scsi_cmnd *scp,
3779 struct sdebug_dev_info *devip)
3781 u8 *cmd = scp->cmnd;
3788 if (sdebug_lbpws10 == 0) {
3789 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3790 return check_condition_result;
3794 lba = get_unaligned_be32(cmd + 2);
3795 num = get_unaligned_be16(cmd + 7);
3796 if (num > sdebug_write_same_length) {
3797 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3798 return check_condition_result;
3800 return resp_write_same(scp, lba, num, ei_lba, unmap, false);
3803 static int resp_write_same_16(struct scsi_cmnd *scp,
3804 struct sdebug_dev_info *devip)
3806 u8 *cmd = scp->cmnd;
3813 if (cmd[1] & 0x8) { /* UNMAP */
3814 if (sdebug_lbpws == 0) {
3815 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
3816 return check_condition_result;
3820 if (cmd[1] & 0x1) /* NDOB (no data-out buffer, assumes zeroes) */
3822 lba = get_unaligned_be64(cmd + 2);
3823 num = get_unaligned_be32(cmd + 10);
3824 if (num > sdebug_write_same_length) {
3825 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
3826 return check_condition_result;
3828 return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
3831 /* Note the mode field is in the same position as the (lower) service action
3832 * field. For the Report supported operation codes command, SPC-4 suggests
3833 * each mode of this command should be reported separately; for future. */
3834 static int resp_write_buffer(struct scsi_cmnd *scp,
3835 struct sdebug_dev_info *devip)
3837 u8 *cmd = scp->cmnd;
3838 struct scsi_device *sdp = scp->device;
3839 struct sdebug_dev_info *dp;
3842 mode = cmd[1] & 0x1f;
3844 case 0x4: /* download microcode (MC) and activate (ACT) */
3845 /* set UAs on this device only */
3846 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
3847 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
3849 case 0x5: /* download MC, save and ACT */
3850 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
3852 case 0x6: /* download MC with offsets and ACT */
3853 /* set UAs on most devices (LUs) in this target */
3854 list_for_each_entry(dp,
3855 &devip->sdbg_host->dev_info_list,
3857 if (dp->target == sdp->id) {
3858 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
3860 set_bit(SDEBUG_UA_MICROCODE_CHANGED,
3864 case 0x7: /* download MC with offsets, save, and ACT */
3865 /* set UA on all devices (LUs) in this target */
3866 list_for_each_entry(dp,
3867 &devip->sdbg_host->dev_info_list,
3869 if (dp->target == sdp->id)
3870 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
3874 /* do nothing for this command for other mode values */
3880 static int resp_comp_write(struct scsi_cmnd *scp,
3881 struct sdebug_dev_info *devip)
3883 u8 *cmd = scp->cmnd;
3885 struct sdeb_store_info *sip = devip2sip(devip, true);
3886 rwlock_t *macc_lckp = &sip->macc_lck;
3889 u32 lb_size = sdebug_sector_size;
3894 lba = get_unaligned_be64(cmd + 2);
3895 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
3897 return 0; /* degenerate case, not an error */
3898 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3900 mk_sense_invalid_opcode(scp);
3901 return check_condition_result;
3903 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3904 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3905 (cmd[1] & 0xe0) == 0)
3906 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3908 ret = check_device_access_params(scp, lba, num, false);
3912 arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
3914 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3916 return check_condition_result;
3919 write_lock(macc_lckp);
3921 ret = do_dout_fetch(scp, dnum, arr);
3923 retval = DID_ERROR << 16;
3925 } else if (sdebug_verbose && (ret < (dnum * lb_size)))
3926 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
3927 "indicated=%u, IO sent=%d bytes\n", my_name,
3928 dnum * lb_size, ret);
3929 if (!comp_write_worker(sip, lba, num, arr, false)) {
3930 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
3931 retval = check_condition_result;
3934 if (scsi_debug_lbp())
3935 map_region(sip, lba, num);
3937 write_unlock(macc_lckp);
3942 struct unmap_block_desc {
3948 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3951 struct unmap_block_desc *desc;
3952 struct sdeb_store_info *sip = devip2sip(devip, true);
3953 rwlock_t *macc_lckp = &sip->macc_lck;
3954 unsigned int i, payload_len, descriptors;
3957 if (!scsi_debug_lbp())
3958 return 0; /* fib and say its done */
3959 payload_len = get_unaligned_be16(scp->cmnd + 7);
3960 BUG_ON(scsi_bufflen(scp) != payload_len);
3962 descriptors = (payload_len - 8) / 16;
3963 if (descriptors > sdebug_unmap_max_desc) {
3964 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
3965 return check_condition_result;
3968 buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
3970 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
3972 return check_condition_result;
3975 scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
3977 BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
3978 BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
3980 desc = (void *)&buf[8];
3982 write_lock(macc_lckp);
3984 for (i = 0 ; i < descriptors ; i++) {
3985 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
3986 unsigned int num = get_unaligned_be32(&desc[i].blocks);
3988 ret = check_device_access_params(scp, lba, num, true);
3992 unmap_region(sip, lba, num);
3998 write_unlock(macc_lckp);
4004 #define SDEBUG_GET_LBA_STATUS_LEN 32
4006 static int resp_get_lba_status(struct scsi_cmnd *scp,
4007 struct sdebug_dev_info *devip)
4009 u8 *cmd = scp->cmnd;
4011 u32 alloc_len, mapped, num;
4013 u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4015 lba = get_unaligned_be64(cmd + 2);
4016 alloc_len = get_unaligned_be32(cmd + 10);
4021 ret = check_device_access_params(scp, lba, 1, false);
4025 if (scsi_debug_lbp()) {
4026 struct sdeb_store_info *sip = devip2sip(devip, true);
4028 mapped = map_state(sip, lba, &num);
4031 /* following just in case virtual_gb changed */
4032 sdebug_capacity = get_sdebug_capacity();
4033 if (sdebug_capacity - lba <= 0xffffffff)
4034 num = sdebug_capacity - lba;
4039 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4040 put_unaligned_be32(20, arr); /* Parameter Data Length */
4041 put_unaligned_be64(lba, arr + 8); /* LBA */
4042 put_unaligned_be32(num, arr + 16); /* Number of blocks */
4043 arr[20] = !mapped; /* prov_stat=0: mapped; 1: dealloc */
4045 return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4048 static int resp_sync_cache(struct scsi_cmnd *scp,
4049 struct sdebug_dev_info *devip)
4054 u8 *cmd = scp->cmnd;
4056 if (cmd[0] == SYNCHRONIZE_CACHE) { /* 10 byte cdb */
4057 lba = get_unaligned_be32(cmd + 2);
4058 num_blocks = get_unaligned_be16(cmd + 7);
4059 } else { /* SYNCHRONIZE_CACHE(16) */
4060 lba = get_unaligned_be64(cmd + 2);
4061 num_blocks = get_unaligned_be32(cmd + 10);
4063 if (lba + num_blocks > sdebug_capacity) {
4064 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4065 return check_condition_result;
4067 if (!write_since_sync || (cmd[1] & 0x2))
4068 res = SDEG_RES_IMMED_MASK;
4069 else /* delay if write_since_sync and IMMED clear */
4070 write_since_sync = false;
4075 * Assuming the LBA+num_blocks is not out-of-range, this function will return
4076 * CONDITION MET if the specified blocks will/have fitted in the cache, and
4077 * a GOOD status otherwise. Model a disk with a big cache and yield
4078 * CONDITION MET. Actually tries to bring range in main memory into the
4079 * cache associated with the CPU(s).
4081 static int resp_pre_fetch(struct scsi_cmnd *scp,
4082 struct sdebug_dev_info *devip)
4086 u64 block, rest = 0;
4088 u8 *cmd = scp->cmnd;
4089 struct sdeb_store_info *sip = devip2sip(devip, true);
4090 rwlock_t *macc_lckp = &sip->macc_lck;
4091 u8 *fsp = sip->storep;
4093 if (cmd[0] == PRE_FETCH) { /* 10 byte cdb */
4094 lba = get_unaligned_be32(cmd + 2);
4095 nblks = get_unaligned_be16(cmd + 7);
4096 } else { /* PRE-FETCH(16) */
4097 lba = get_unaligned_be64(cmd + 2);
4098 nblks = get_unaligned_be32(cmd + 10);
4100 if (lba + nblks > sdebug_capacity) {
4101 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4102 return check_condition_result;
4106 /* PRE-FETCH spec says nothing about LBP or PI so skip them */
4107 block = do_div(lba, sdebug_store_sectors);
4108 if (block + nblks > sdebug_store_sectors)
4109 rest = block + nblks - sdebug_store_sectors;
4111 /* Try to bring the PRE-FETCH range into CPU's cache */
4112 read_lock(macc_lckp);
4113 prefetch_range(fsp + (sdebug_sector_size * block),
4114 (nblks - rest) * sdebug_sector_size);
4116 prefetch_range(fsp, rest * sdebug_sector_size);
4117 read_unlock(macc_lckp);
4120 res = SDEG_RES_IMMED_MASK;
4121 return res | condition_met_result;
4124 #define RL_BUCKET_ELEMS 8
4126 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4127 * (W-LUN), the normal Linux scanning logic does not associate it with a
4128 * device (e.g. /dev/sg7). The following magic will make that association:
4129 * "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4130 * where <n> is a host number. If there are multiple targets in a host then
4131 * the above will associate a W-LUN to each target. To only get a W-LUN
4132 * for target 2, then use "echo '- 2 49409' > scan" .
4134 static int resp_report_luns(struct scsi_cmnd *scp,
4135 struct sdebug_dev_info *devip)
4137 unsigned char *cmd = scp->cmnd;
4138 unsigned int alloc_len;
4139 unsigned char select_report;
4141 struct scsi_lun *lun_p;
4142 u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4143 unsigned int lun_cnt; /* normal LUN count (max: 256) */
4144 unsigned int wlun_cnt; /* report luns W-LUN count */
4145 unsigned int tlun_cnt; /* total LUN count */
4146 unsigned int rlen; /* response length (in bytes) */
4148 unsigned int off_rsp = 0;
4149 const int sz_lun = sizeof(struct scsi_lun);
4151 clear_luns_changed_on_target(devip);
4153 select_report = cmd[2];
4154 alloc_len = get_unaligned_be32(cmd + 6);
4156 if (alloc_len < 4) {
4157 pr_err("alloc len too small %d\n", alloc_len);
4158 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4159 return check_condition_result;
4162 switch (select_report) {
4163 case 0: /* all LUNs apart from W-LUNs */
4164 lun_cnt = sdebug_max_luns;
4167 case 1: /* only W-LUNs */
4171 case 2: /* all LUNs */
4172 lun_cnt = sdebug_max_luns;
4175 case 0x10: /* only administrative LUs */
4176 case 0x11: /* see SPC-5 */
4177 case 0x12: /* only subsiduary LUs owned by referenced LU */
4179 pr_debug("select report invalid %d\n", select_report);
4180 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4181 return check_condition_result;
4184 if (sdebug_no_lun_0 && (lun_cnt > 0))
4187 tlun_cnt = lun_cnt + wlun_cnt;
4188 rlen = tlun_cnt * sz_lun; /* excluding 8 byte header */
4189 scsi_set_resid(scp, scsi_bufflen(scp));
4190 pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4191 select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4193 /* loops rely on sizeof response header same as sizeof lun (both 8) */
4194 lun = sdebug_no_lun_0 ? 1 : 0;
4195 for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4196 memset(arr, 0, sizeof(arr));
4197 lun_p = (struct scsi_lun *)&arr[0];
4199 put_unaligned_be32(rlen, &arr[0]);
4203 for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4204 if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4206 int_to_scsilun(lun++, lun_p);
4207 if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4208 lun_p->scsi_lun[0] |= 0x40;
4210 if (j < RL_BUCKET_ELEMS)
4213 res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4219 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4223 res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4227 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4229 bool is_bytchk3 = false;
4232 u32 vnum, a_num, off;
4233 const u32 lb_size = sdebug_sector_size;
4236 u8 *cmd = scp->cmnd;
4237 struct sdeb_store_info *sip = devip2sip(devip, true);
4238 rwlock_t *macc_lckp = &sip->macc_lck;
4240 bytchk = (cmd[1] >> 1) & 0x3;
4242 return 0; /* always claim internal verify okay */
4243 } else if (bytchk == 2) {
4244 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4245 return check_condition_result;
4246 } else if (bytchk == 3) {
4247 is_bytchk3 = true; /* 1 block sent, compared repeatedly */
4251 lba = get_unaligned_be64(cmd + 2);
4252 vnum = get_unaligned_be32(cmd + 10);
4254 case VERIFY: /* is VERIFY(10) */
4255 lba = get_unaligned_be32(cmd + 2);
4256 vnum = get_unaligned_be16(cmd + 7);
4259 mk_sense_invalid_opcode(scp);
4260 return check_condition_result;
4263 return 0; /* not an error */
4264 a_num = is_bytchk3 ? 1 : vnum;
4265 /* Treat following check like one for read (i.e. no write) access */
4266 ret = check_device_access_params(scp, lba, a_num, false);
4270 arr = kcalloc(lb_size, vnum, GFP_ATOMIC);
4272 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4274 return check_condition_result;
4276 /* Not changing store, so only need read access */
4277 read_lock(macc_lckp);
4279 ret = do_dout_fetch(scp, a_num, arr);
4281 ret = DID_ERROR << 16;
4283 } else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4284 sdev_printk(KERN_INFO, scp->device,
4285 "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4286 my_name, __func__, a_num * lb_size, ret);
4289 for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4290 memcpy(arr + off, arr, lb_size);
4293 if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4294 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4295 ret = check_condition_result;
4299 read_unlock(macc_lckp);
4304 #define RZONES_DESC_HD 64
4306 /* Report zones depending on start LBA nad reporting options */
4307 static int resp_report_zones(struct scsi_cmnd *scp,
4308 struct sdebug_dev_info *devip)
4310 unsigned int i, max_zones, rep_max_zones, nrz = 0;
4312 u32 alloc_len, rep_opts, rep_len;
4315 u8 *arr = NULL, *desc;
4316 u8 *cmd = scp->cmnd;
4317 struct sdeb_zone_state *zsp;
4318 struct sdeb_store_info *sip = devip2sip(devip, false);
4319 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4321 if (!sdebug_dev_is_zoned(devip)) {
4322 mk_sense_invalid_opcode(scp);
4323 return check_condition_result;
4325 zs_lba = get_unaligned_be64(cmd + 2);
4326 alloc_len = get_unaligned_be32(cmd + 10);
4328 return 0; /* not an error */
4329 rep_opts = cmd[14] & 0x3f;
4330 partial = cmd[14] & 0x80;
4332 if (zs_lba >= sdebug_capacity) {
4333 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4334 return check_condition_result;
4337 max_zones = devip->nr_zones - (zs_lba >> devip->zsize_shift);
4338 rep_max_zones = min((alloc_len - 64) >> ilog2(RZONES_DESC_HD),
4341 arr = kcalloc(RZONES_DESC_HD, alloc_len, GFP_ATOMIC);
4343 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4345 return check_condition_result;
4348 read_lock(macc_lckp);
4351 for (i = 0; i < max_zones; i++) {
4352 lba = zs_lba + devip->zsize * i;
4353 if (lba > sdebug_capacity)
4355 zsp = zbc_zone(devip, lba);
4362 if (zsp->z_cond != ZC1_EMPTY)
4366 /* Implicit open zones */
4367 if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4371 /* Explicit open zones */
4372 if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4377 if (zsp->z_cond != ZC4_CLOSED)
4382 if (zsp->z_cond != ZC5_FULL)
4389 * Read-only, offline, reset WP recommended are
4390 * not emulated: no zones to report;
4394 /* non-seq-resource set */
4395 if (!zsp->z_non_seq_resource)
4399 /* Not write pointer (conventional) zones */
4400 if (!zbc_zone_is_conv(zsp))
4404 mk_sense_buffer(scp, ILLEGAL_REQUEST,
4405 INVALID_FIELD_IN_CDB, 0);
4406 ret = check_condition_result;
4410 if (nrz < rep_max_zones) {
4411 /* Fill zone descriptor */
4412 desc[0] = zsp->z_type;
4413 desc[1] = zsp->z_cond << 4;
4414 if (zsp->z_non_seq_resource)
4416 put_unaligned_be64((u64)zsp->z_size, desc + 8);
4417 put_unaligned_be64((u64)zsp->z_start, desc + 16);
4418 put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4422 if (partial && nrz >= rep_max_zones)
4429 put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
4430 put_unaligned_be64(sdebug_capacity - 1, arr + 8);
4432 rep_len = (unsigned long)desc - (unsigned long)arr;
4433 ret = fill_from_dev_buffer(scp, arr, min_t(int, alloc_len, rep_len));
4436 read_unlock(macc_lckp);
4441 /* Logic transplanted from tcmu-runner, file_zbc.c */
4442 static void zbc_open_all(struct sdebug_dev_info *devip)
4444 struct sdeb_zone_state *zsp = &devip->zstate[0];
4447 for (i = 0; i < devip->nr_zones; i++, zsp++) {
4448 if (zsp->z_cond == ZC4_CLOSED)
4449 zbc_open_zone(devip, &devip->zstate[i], true);
4453 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4457 enum sdebug_z_cond zc;
4458 u8 *cmd = scp->cmnd;
4459 struct sdeb_zone_state *zsp;
4460 bool all = cmd[14] & 0x01;
4461 struct sdeb_store_info *sip = devip2sip(devip, false);
4462 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4464 if (!sdebug_dev_is_zoned(devip)) {
4465 mk_sense_invalid_opcode(scp);
4466 return check_condition_result;
4469 write_lock(macc_lckp);
4472 /* Check if all closed zones can be open */
4473 if (devip->max_open &&
4474 devip->nr_exp_open + devip->nr_closed > devip->max_open) {
4475 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4477 res = check_condition_result;
4480 /* Open all closed zones */
4481 zbc_open_all(devip);
4485 /* Open the specified zone */
4486 z_id = get_unaligned_be64(cmd + 2);
4487 if (z_id >= sdebug_capacity) {
4488 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4489 res = check_condition_result;
4493 zsp = zbc_zone(devip, z_id);
4494 if (z_id != zsp->z_start) {
4495 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4496 res = check_condition_result;
4499 if (zbc_zone_is_conv(zsp)) {
4500 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4501 res = check_condition_result;
4506 if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
4509 if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
4510 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
4512 res = check_condition_result;
4516 zbc_open_zone(devip, zsp, true);
4518 write_unlock(macc_lckp);
4522 static void zbc_close_all(struct sdebug_dev_info *devip)
4526 for (i = 0; i < devip->nr_zones; i++)
4527 zbc_close_zone(devip, &devip->zstate[i]);
4530 static int resp_close_zone(struct scsi_cmnd *scp,
4531 struct sdebug_dev_info *devip)
4535 u8 *cmd = scp->cmnd;
4536 struct sdeb_zone_state *zsp;
4537 bool all = cmd[14] & 0x01;
4538 struct sdeb_store_info *sip = devip2sip(devip, false);
4539 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4541 if (!sdebug_dev_is_zoned(devip)) {
4542 mk_sense_invalid_opcode(scp);
4543 return check_condition_result;
4546 write_lock(macc_lckp);
4549 zbc_close_all(devip);
4553 /* Close specified zone */
4554 z_id = get_unaligned_be64(cmd + 2);
4555 if (z_id >= sdebug_capacity) {
4556 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4557 res = check_condition_result;
4561 zsp = zbc_zone(devip, z_id);
4562 if (z_id != zsp->z_start) {
4563 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4564 res = check_condition_result;
4567 if (zbc_zone_is_conv(zsp)) {
4568 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4569 res = check_condition_result;
4573 zbc_close_zone(devip, zsp);
4575 write_unlock(macc_lckp);
4579 static void zbc_finish_zone(struct sdebug_dev_info *devip,
4580 struct sdeb_zone_state *zsp, bool empty)
4582 enum sdebug_z_cond zc = zsp->z_cond;
4584 if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
4585 zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
4586 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4587 zbc_close_zone(devip, zsp);
4588 if (zsp->z_cond == ZC4_CLOSED)
4590 zsp->z_wp = zsp->z_start + zsp->z_size;
4591 zsp->z_cond = ZC5_FULL;
4595 static void zbc_finish_all(struct sdebug_dev_info *devip)
4599 for (i = 0; i < devip->nr_zones; i++)
4600 zbc_finish_zone(devip, &devip->zstate[i], false);
4603 static int resp_finish_zone(struct scsi_cmnd *scp,
4604 struct sdebug_dev_info *devip)
4606 struct sdeb_zone_state *zsp;
4609 u8 *cmd = scp->cmnd;
4610 bool all = cmd[14] & 0x01;
4611 struct sdeb_store_info *sip = devip2sip(devip, false);
4612 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4614 if (!sdebug_dev_is_zoned(devip)) {
4615 mk_sense_invalid_opcode(scp);
4616 return check_condition_result;
4619 write_lock(macc_lckp);
4622 zbc_finish_all(devip);
4626 /* Finish the specified zone */
4627 z_id = get_unaligned_be64(cmd + 2);
4628 if (z_id >= sdebug_capacity) {
4629 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4630 res = check_condition_result;
4634 zsp = zbc_zone(devip, z_id);
4635 if (z_id != zsp->z_start) {
4636 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4637 res = check_condition_result;
4640 if (zbc_zone_is_conv(zsp)) {
4641 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4642 res = check_condition_result;
4646 zbc_finish_zone(devip, zsp, true);
4648 write_unlock(macc_lckp);
4652 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
4653 struct sdeb_zone_state *zsp)
4655 enum sdebug_z_cond zc;
4657 if (zbc_zone_is_conv(zsp))
4661 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
4662 zbc_close_zone(devip, zsp);
4664 if (zsp->z_cond == ZC4_CLOSED)
4667 zsp->z_non_seq_resource = false;
4668 zsp->z_wp = zsp->z_start;
4669 zsp->z_cond = ZC1_EMPTY;
4672 static void zbc_rwp_all(struct sdebug_dev_info *devip)
4676 for (i = 0; i < devip->nr_zones; i++)
4677 zbc_rwp_zone(devip, &devip->zstate[i]);
4680 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4682 struct sdeb_zone_state *zsp;
4685 u8 *cmd = scp->cmnd;
4686 bool all = cmd[14] & 0x01;
4687 struct sdeb_store_info *sip = devip2sip(devip, false);
4688 rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
4690 if (!sdebug_dev_is_zoned(devip)) {
4691 mk_sense_invalid_opcode(scp);
4692 return check_condition_result;
4695 write_lock(macc_lckp);
4702 z_id = get_unaligned_be64(cmd + 2);
4703 if (z_id >= sdebug_capacity) {
4704 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4705 res = check_condition_result;
4709 zsp = zbc_zone(devip, z_id);
4710 if (z_id != zsp->z_start) {
4711 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4712 res = check_condition_result;
4715 if (zbc_zone_is_conv(zsp)) {
4716 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4717 res = check_condition_result;
4721 zbc_rwp_zone(devip, zsp);
4723 write_unlock(macc_lckp);
4727 static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
4730 u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4732 hwq = blk_mq_unique_tag_to_hwq(tag);
4734 pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
4735 if (WARN_ON_ONCE(hwq >= submit_queues))
4738 return sdebug_q_arr + hwq;
4741 static u32 get_tag(struct scsi_cmnd *cmnd)
4743 return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
4746 /* Queued (deferred) command completions converge here. */
4747 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
4749 bool aborted = sd_dp->aborted;
4752 unsigned long iflags;
4753 struct sdebug_queue *sqp;
4754 struct sdebug_queued_cmd *sqcp;
4755 struct scsi_cmnd *scp;
4756 struct sdebug_dev_info *devip;
4758 if (unlikely(aborted))
4759 sd_dp->aborted = false;
4760 qc_idx = sd_dp->qc_idx;
4761 sqp = sdebug_q_arr + sd_dp->sqa_idx;
4762 if (sdebug_statistics) {
4763 atomic_inc(&sdebug_completions);
4764 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
4765 atomic_inc(&sdebug_miss_cpus);
4767 if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
4768 pr_err("wild qc_idx=%d\n", qc_idx);
4771 spin_lock_irqsave(&sqp->qc_lock, iflags);
4772 sd_dp->defer_t = SDEB_DEFER_NONE;
4773 sqcp = &sqp->qc_arr[qc_idx];
4775 if (unlikely(scp == NULL)) {
4776 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4777 pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
4778 sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx);
4781 devip = (struct sdebug_dev_info *)scp->device->hostdata;
4783 atomic_dec(&devip->num_in_q);
4785 pr_err("devip=NULL\n");
4786 if (unlikely(atomic_read(&retired_max_queue) > 0))
4789 sqcp->a_cmnd = NULL;
4790 if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
4791 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4792 pr_err("Unexpected completion\n");
4796 if (unlikely(retiring)) { /* user has reduced max_queue */
4799 retval = atomic_read(&retired_max_queue);
4800 if (qc_idx >= retval) {
4801 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4802 pr_err("index %d too large\n", retval);
4805 k = find_last_bit(sqp->in_use_bm, retval);
4806 if ((k < sdebug_max_queue) || (k == retval))
4807 atomic_set(&retired_max_queue, 0);
4809 atomic_set(&retired_max_queue, k + 1);
4811 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
4812 if (unlikely(aborted)) {
4814 pr_info("bypassing scsi_done() due to aborted cmd\n");
4817 scsi_done(scp); /* callback to mid level */
4820 /* When high resolution timer goes off this function is called. */
4821 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
4823 struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
4825 sdebug_q_cmd_complete(sd_dp);
4826 return HRTIMER_NORESTART;
4829 /* When work queue schedules work, it calls this function. */
4830 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
4832 struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
4834 sdebug_q_cmd_complete(sd_dp);
4837 static bool got_shared_uuid;
4838 static uuid_t shared_uuid;
4840 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
4842 struct sdeb_zone_state *zsp;
4843 sector_t capacity = get_sdebug_capacity();
4844 sector_t zstart = 0;
4848 * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
4849 * a zone size allowing for at least 4 zones on the device. Otherwise,
4850 * use the specified zone size checking that at least 2 zones can be
4851 * created for the device.
4853 if (!sdeb_zbc_zone_size_mb) {
4854 devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
4855 >> ilog2(sdebug_sector_size);
4856 while (capacity < devip->zsize << 2 && devip->zsize >= 2)
4858 if (devip->zsize < 2) {
4859 pr_err("Device capacity too small\n");
4863 if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
4864 pr_err("Zone size is not a power of 2\n");
4867 devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
4868 >> ilog2(sdebug_sector_size);
4869 if (devip->zsize >= capacity) {
4870 pr_err("Zone size too large for device capacity\n");
4875 devip->zsize_shift = ilog2(devip->zsize);
4876 devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
4878 if (sdeb_zbc_nr_conv >= devip->nr_zones) {
4879 pr_err("Number of conventional zones too large\n");
4882 devip->nr_conv_zones = sdeb_zbc_nr_conv;
4884 if (devip->zmodel == BLK_ZONED_HM) {
4885 /* zbc_max_open_zones can be 0, meaning "not reported" */
4886 if (sdeb_zbc_max_open >= devip->nr_zones - 1)
4887 devip->max_open = (devip->nr_zones - 1) / 2;
4889 devip->max_open = sdeb_zbc_max_open;
4892 devip->zstate = kcalloc(devip->nr_zones,
4893 sizeof(struct sdeb_zone_state), GFP_KERNEL);
4897 for (i = 0; i < devip->nr_zones; i++) {
4898 zsp = &devip->zstate[i];
4900 zsp->z_start = zstart;
4902 if (i < devip->nr_conv_zones) {
4903 zsp->z_type = ZBC_ZONE_TYPE_CNV;
4904 zsp->z_cond = ZBC_NOT_WRITE_POINTER;
4905 zsp->z_wp = (sector_t)-1;
4907 if (devip->zmodel == BLK_ZONED_HM)
4908 zsp->z_type = ZBC_ZONE_TYPE_SWR;
4910 zsp->z_type = ZBC_ZONE_TYPE_SWP;
4911 zsp->z_cond = ZC1_EMPTY;
4912 zsp->z_wp = zsp->z_start;
4915 if (zsp->z_start + devip->zsize < capacity)
4916 zsp->z_size = devip->zsize;
4918 zsp->z_size = capacity - zsp->z_start;
4920 zstart += zsp->z_size;
4926 static struct sdebug_dev_info *sdebug_device_create(
4927 struct sdebug_host_info *sdbg_host, gfp_t flags)
4929 struct sdebug_dev_info *devip;
4931 devip = kzalloc(sizeof(*devip), flags);
4933 if (sdebug_uuid_ctl == 1)
4934 uuid_gen(&devip->lu_name);
4935 else if (sdebug_uuid_ctl == 2) {
4936 if (got_shared_uuid)
4937 devip->lu_name = shared_uuid;
4939 uuid_gen(&shared_uuid);
4940 got_shared_uuid = true;
4941 devip->lu_name = shared_uuid;
4944 devip->sdbg_host = sdbg_host;
4945 if (sdeb_zbc_in_use) {
4946 devip->zmodel = sdeb_zbc_model;
4947 if (sdebug_device_create_zones(devip)) {
4952 devip->zmodel = BLK_ZONED_NONE;
4954 devip->sdbg_host = sdbg_host;
4955 devip->create_ts = ktime_get_boottime();
4956 atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
4957 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
4962 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
4964 struct sdebug_host_info *sdbg_host;
4965 struct sdebug_dev_info *open_devip = NULL;
4966 struct sdebug_dev_info *devip;
4968 sdbg_host = *(struct sdebug_host_info **)shost_priv(sdev->host);
4970 pr_err("Host info NULL\n");
4974 list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
4975 if ((devip->used) && (devip->channel == sdev->channel) &&
4976 (devip->target == sdev->id) &&
4977 (devip->lun == sdev->lun))
4980 if ((!devip->used) && (!open_devip))
4984 if (!open_devip) { /* try and make a new one */
4985 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
4987 pr_err("out of memory at line %d\n", __LINE__);
4992 open_devip->channel = sdev->channel;
4993 open_devip->target = sdev->id;
4994 open_devip->lun = sdev->lun;
4995 open_devip->sdbg_host = sdbg_host;
4996 atomic_set(&open_devip->num_in_q, 0);
4997 set_bit(SDEBUG_UA_POR, open_devip->uas_bm);
4998 open_devip->used = true;
5002 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5005 pr_info("slave_alloc <%u %u %u %llu>\n",
5006 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5010 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5012 struct sdebug_dev_info *devip =
5013 (struct sdebug_dev_info *)sdp->hostdata;
5016 pr_info("slave_configure <%u %u %u %llu>\n",
5017 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5018 if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5019 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5020 if (devip == NULL) {
5021 devip = find_build_dev_info(sdp);
5023 return 1; /* no resources, will be marked offline */
5025 sdp->hostdata = devip;
5027 sdp->no_uld_attach = 1;
5028 config_cdb_len(sdp);
5032 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5034 struct sdebug_dev_info *devip =
5035 (struct sdebug_dev_info *)sdp->hostdata;
5038 pr_info("slave_destroy <%u %u %u %llu>\n",
5039 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5041 /* make this slot available for re-use */
5042 devip->used = false;
5043 sdp->hostdata = NULL;
5047 static void stop_qc_helper(struct sdebug_defer *sd_dp,
5048 enum sdeb_defer_type defer_t)
5052 if (defer_t == SDEB_DEFER_HRT)
5053 hrtimer_cancel(&sd_dp->hrt);
5054 else if (defer_t == SDEB_DEFER_WQ)
5055 cancel_work_sync(&sd_dp->ew.work);
5058 /* If @cmnd found deletes its timer or work queue and returns true; else
5060 static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
5062 unsigned long iflags;
5063 int j, k, qmax, r_qmax;
5064 enum sdeb_defer_type l_defer_t;
5065 struct sdebug_queue *sqp;
5066 struct sdebug_queued_cmd *sqcp;
5067 struct sdebug_dev_info *devip;
5068 struct sdebug_defer *sd_dp;
5070 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5071 spin_lock_irqsave(&sqp->qc_lock, iflags);
5072 qmax = sdebug_max_queue;
5073 r_qmax = atomic_read(&retired_max_queue);
5076 for (k = 0; k < qmax; ++k) {
5077 if (test_bit(k, sqp->in_use_bm)) {
5078 sqcp = &sqp->qc_arr[k];
5079 if (cmnd != sqcp->a_cmnd)
5082 devip = (struct sdebug_dev_info *)
5083 cmnd->device->hostdata;
5085 atomic_dec(&devip->num_in_q);
5086 sqcp->a_cmnd = NULL;
5087 sd_dp = sqcp->sd_dp;
5089 l_defer_t = sd_dp->defer_t;
5090 sd_dp->defer_t = SDEB_DEFER_NONE;
5092 l_defer_t = SDEB_DEFER_NONE;
5093 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5094 stop_qc_helper(sd_dp, l_defer_t);
5095 clear_bit(k, sqp->in_use_bm);
5099 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5104 /* Deletes (stops) timers or work queues of all queued commands */
5105 static void stop_all_queued(void)
5107 unsigned long iflags;
5109 enum sdeb_defer_type l_defer_t;
5110 struct sdebug_queue *sqp;
5111 struct sdebug_queued_cmd *sqcp;
5112 struct sdebug_dev_info *devip;
5113 struct sdebug_defer *sd_dp;
5115 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5116 spin_lock_irqsave(&sqp->qc_lock, iflags);
5117 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5118 if (test_bit(k, sqp->in_use_bm)) {
5119 sqcp = &sqp->qc_arr[k];
5120 if (sqcp->a_cmnd == NULL)
5122 devip = (struct sdebug_dev_info *)
5123 sqcp->a_cmnd->device->hostdata;
5125 atomic_dec(&devip->num_in_q);
5126 sqcp->a_cmnd = NULL;
5127 sd_dp = sqcp->sd_dp;
5129 l_defer_t = sd_dp->defer_t;
5130 sd_dp->defer_t = SDEB_DEFER_NONE;
5132 l_defer_t = SDEB_DEFER_NONE;
5133 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5134 stop_qc_helper(sd_dp, l_defer_t);
5135 clear_bit(k, sqp->in_use_bm);
5136 spin_lock_irqsave(&sqp->qc_lock, iflags);
5139 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5143 /* Free queued command memory on heap */
5144 static void free_all_queued(void)
5147 struct sdebug_queue *sqp;
5148 struct sdebug_queued_cmd *sqcp;
5150 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5151 for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
5152 sqcp = &sqp->qc_arr[k];
5159 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5165 ok = stop_queued_cmnd(SCpnt);
5166 if (SCpnt->device && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5167 sdev_printk(KERN_INFO, SCpnt->device,
5168 "%s: command%s found\n", __func__,
5174 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5177 if (SCpnt && SCpnt->device) {
5178 struct scsi_device *sdp = SCpnt->device;
5179 struct sdebug_dev_info *devip =
5180 (struct sdebug_dev_info *)sdp->hostdata;
5182 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5183 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5185 set_bit(SDEBUG_UA_POR, devip->uas_bm);
5190 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5192 struct sdebug_host_info *sdbg_host;
5193 struct sdebug_dev_info *devip;
5194 struct scsi_device *sdp;
5195 struct Scsi_Host *hp;
5198 ++num_target_resets;
5201 sdp = SCpnt->device;
5204 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5205 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5209 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5211 list_for_each_entry(devip,
5212 &sdbg_host->dev_info_list,
5214 if (devip->target == sdp->id) {
5215 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5219 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5220 sdev_printk(KERN_INFO, sdp,
5221 "%s: %d device(s) found in target\n", __func__, k);
5226 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5228 struct sdebug_host_info *sdbg_host;
5229 struct sdebug_dev_info *devip;
5230 struct scsi_device *sdp;
5231 struct Scsi_Host *hp;
5235 if (!(SCpnt && SCpnt->device))
5237 sdp = SCpnt->device;
5238 if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5239 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5242 sdbg_host = *(struct sdebug_host_info **)shost_priv(hp);
5244 list_for_each_entry(devip,
5245 &sdbg_host->dev_info_list,
5247 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5252 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5253 sdev_printk(KERN_INFO, sdp,
5254 "%s: %d device(s) found in host\n", __func__, k);
5259 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5261 struct sdebug_host_info *sdbg_host;
5262 struct sdebug_dev_info *devip;
5266 if ((SCpnt->device) && (SDEBUG_OPT_ALL_NOISE & sdebug_opts))
5267 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5268 spin_lock(&sdebug_host_list_lock);
5269 list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5270 list_for_each_entry(devip, &sdbg_host->dev_info_list,
5272 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5276 spin_unlock(&sdebug_host_list_lock);
5278 if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5279 sdev_printk(KERN_INFO, SCpnt->device,
5280 "%s: %d device(s) found\n", __func__, k);
5284 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5286 struct msdos_partition *pp;
5287 int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5288 int sectors_per_part, num_sectors, k;
5289 int heads_by_sects, start_sec, end_sec;
5291 /* assume partition table already zeroed */
5292 if ((sdebug_num_parts < 1) || (store_size < 1048576))
5294 if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5295 sdebug_num_parts = SDEBUG_MAX_PARTS;
5296 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5298 num_sectors = (int)get_sdebug_capacity();
5299 sectors_per_part = (num_sectors - sdebug_sectors_per)
5301 heads_by_sects = sdebug_heads * sdebug_sectors_per;
5302 starts[0] = sdebug_sectors_per;
5303 max_part_secs = sectors_per_part;
5304 for (k = 1; k < sdebug_num_parts; ++k) {
5305 starts[k] = ((k * sectors_per_part) / heads_by_sects)
5307 if (starts[k] - starts[k - 1] < max_part_secs)
5308 max_part_secs = starts[k] - starts[k - 1];
5310 starts[sdebug_num_parts] = num_sectors;
5311 starts[sdebug_num_parts + 1] = 0;
5313 ramp[510] = 0x55; /* magic partition markings */
5315 pp = (struct msdos_partition *)(ramp + 0x1be);
5316 for (k = 0; starts[k + 1]; ++k, ++pp) {
5317 start_sec = starts[k];
5318 end_sec = starts[k] + max_part_secs - 1;
5321 pp->cyl = start_sec / heads_by_sects;
5322 pp->head = (start_sec - (pp->cyl * heads_by_sects))
5323 / sdebug_sectors_per;
5324 pp->sector = (start_sec % sdebug_sectors_per) + 1;
5326 pp->end_cyl = end_sec / heads_by_sects;
5327 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5328 / sdebug_sectors_per;
5329 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5331 pp->start_sect = cpu_to_le32(start_sec);
5332 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5333 pp->sys_ind = 0x83; /* plain Linux partition */
5337 static void block_unblock_all_queues(bool block)
5340 struct sdebug_queue *sqp;
5342 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
5343 atomic_set(&sqp->blocked, (int)block);
5346 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
5347 * commands will be processed normally before triggers occur.
5349 static void tweak_cmnd_count(void)
5353 modulo = abs(sdebug_every_nth);
5356 block_unblock_all_queues(true);
5357 count = atomic_read(&sdebug_cmnd_count);
5358 atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
5359 block_unblock_all_queues(false);
5362 static void clear_queue_stats(void)
5364 atomic_set(&sdebug_cmnd_count, 0);
5365 atomic_set(&sdebug_completions, 0);
5366 atomic_set(&sdebug_miss_cpus, 0);
5367 atomic_set(&sdebug_a_tsf, 0);
5370 static bool inject_on_this_cmd(void)
5372 if (sdebug_every_nth == 0)
5374 return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
5377 #define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
5379 /* Complete the processing of the thread that queued a SCSI command to this
5380 * driver. It either completes the command by calling cmnd_done() or
5381 * schedules a hr timer or work queue then returns 0. Returns
5382 * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
5384 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
5386 int (*pfp)(struct scsi_cmnd *,
5387 struct sdebug_dev_info *),
5388 int delta_jiff, int ndelay)
5391 bool inject = false;
5392 bool hipri = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_HIPRI;
5393 int k, num_in_q, qdepth;
5394 unsigned long iflags;
5395 u64 ns_from_boot = 0;
5396 struct sdebug_queue *sqp;
5397 struct sdebug_queued_cmd *sqcp;
5398 struct scsi_device *sdp;
5399 struct sdebug_defer *sd_dp;
5401 if (unlikely(devip == NULL)) {
5402 if (scsi_result == 0)
5403 scsi_result = DID_NO_CONNECT << 16;
5404 goto respond_in_thread;
5408 if (delta_jiff == 0)
5409 goto respond_in_thread;
5411 sqp = get_queue(cmnd);
5412 spin_lock_irqsave(&sqp->qc_lock, iflags);
5413 if (unlikely(atomic_read(&sqp->blocked))) {
5414 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5415 return SCSI_MLQUEUE_HOST_BUSY;
5417 num_in_q = atomic_read(&devip->num_in_q);
5418 qdepth = cmnd->device->queue_depth;
5419 if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
5421 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5422 goto respond_in_thread;
5424 scsi_result = device_qfull_result;
5425 } else if (unlikely(sdebug_every_nth &&
5426 (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
5427 (scsi_result == 0))) {
5428 if ((num_in_q == (qdepth - 1)) &&
5429 (atomic_inc_return(&sdebug_a_tsf) >=
5430 abs(sdebug_every_nth))) {
5431 atomic_set(&sdebug_a_tsf, 0);
5433 scsi_result = device_qfull_result;
5437 k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
5438 if (unlikely(k >= sdebug_max_queue)) {
5439 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5441 goto respond_in_thread;
5442 else if (SDEBUG_OPT_ALL_TSF & sdebug_opts)
5443 scsi_result = device_qfull_result;
5444 if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
5445 sdev_printk(KERN_INFO, sdp,
5446 "%s: max_queue=%d exceeded, %s\n",
5447 __func__, sdebug_max_queue,
5448 (scsi_result ? "status: TASK SET FULL" :
5449 "report: host busy"));
5451 goto respond_in_thread;
5453 return SCSI_MLQUEUE_HOST_BUSY;
5455 set_bit(k, sqp->in_use_bm);
5456 atomic_inc(&devip->num_in_q);
5457 sqcp = &sqp->qc_arr[k];
5458 sqcp->a_cmnd = cmnd;
5459 cmnd->host_scribble = (unsigned char *)sqcp;
5460 sd_dp = sqcp->sd_dp;
5461 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5464 sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
5466 atomic_dec(&devip->num_in_q);
5467 clear_bit(k, sqp->in_use_bm);
5468 return SCSI_MLQUEUE_HOST_BUSY;
5475 /* Set the hostwide tag */
5476 if (sdebug_host_max_queue)
5477 sd_dp->hc_idx = get_tag(cmnd);
5480 ns_from_boot = ktime_get_boottime_ns();
5482 /* one of the resp_*() response functions is called here */
5483 cmnd->result = pfp ? pfp(cmnd, devip) : 0;
5484 if (cmnd->result & SDEG_RES_IMMED_MASK) {
5485 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5486 delta_jiff = ndelay = 0;
5488 if (cmnd->result == 0 && scsi_result != 0)
5489 cmnd->result = scsi_result;
5490 if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
5491 if (atomic_read(&sdeb_inject_pending)) {
5492 mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
5493 atomic_set(&sdeb_inject_pending, 0);
5494 cmnd->result = check_condition_result;
5498 if (unlikely(sdebug_verbose && cmnd->result))
5499 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
5500 __func__, cmnd->result);
5502 if (delta_jiff > 0 || ndelay > 0) {
5505 if (delta_jiff > 0) {
5506 u64 ns = jiffies_to_nsecs(delta_jiff);
5508 if (sdebug_random && ns < U32_MAX) {
5509 ns = prandom_u32_max((u32)ns);
5510 } else if (sdebug_random) {
5511 ns >>= 12; /* scale to 4 usec precision */
5512 if (ns < U32_MAX) /* over 4 hours max */
5513 ns = prandom_u32_max((u32)ns);
5516 kt = ns_to_ktime(ns);
5517 } else { /* ndelay has a 4.2 second max */
5518 kt = sdebug_random ? prandom_u32_max((u32)ndelay) :
5520 if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
5521 u64 d = ktime_get_boottime_ns() - ns_from_boot;
5523 if (kt <= d) { /* elapsed duration >= kt */
5524 spin_lock_irqsave(&sqp->qc_lock, iflags);
5525 sqcp->a_cmnd = NULL;
5526 atomic_dec(&devip->num_in_q);
5527 clear_bit(k, sqp->in_use_bm);
5528 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5531 /* call scsi_done() from this thread */
5535 /* otherwise reduce kt by elapsed time */
5540 sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
5541 spin_lock_irqsave(&sqp->qc_lock, iflags);
5542 if (!sd_dp->init_poll) {
5543 sd_dp->init_poll = true;
5544 sqcp->sd_dp = sd_dp;
5545 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5548 sd_dp->defer_t = SDEB_DEFER_POLL;
5549 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5551 if (!sd_dp->init_hrt) {
5552 sd_dp->init_hrt = true;
5553 sqcp->sd_dp = sd_dp;
5554 hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
5555 HRTIMER_MODE_REL_PINNED);
5556 sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
5557 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5560 sd_dp->defer_t = SDEB_DEFER_HRT;
5561 /* schedule the invocation of scsi_done() for a later time */
5562 hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
5564 if (sdebug_statistics)
5565 sd_dp->issuing_cpu = raw_smp_processor_id();
5566 } else { /* jdelay < 0, use work queue */
5567 if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
5568 atomic_read(&sdeb_inject_pending)))
5569 sd_dp->aborted = true;
5571 sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
5572 spin_lock_irqsave(&sqp->qc_lock, iflags);
5573 if (!sd_dp->init_poll) {
5574 sd_dp->init_poll = true;
5575 sqcp->sd_dp = sd_dp;
5576 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5579 sd_dp->defer_t = SDEB_DEFER_POLL;
5580 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
5582 if (!sd_dp->init_wq) {
5583 sd_dp->init_wq = true;
5584 sqcp->sd_dp = sd_dp;
5585 sd_dp->sqa_idx = sqp - sdebug_q_arr;
5587 INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
5589 sd_dp->defer_t = SDEB_DEFER_WQ;
5590 schedule_work(&sd_dp->ew.work);
5592 if (sdebug_statistics)
5593 sd_dp->issuing_cpu = raw_smp_processor_id();
5594 if (unlikely(sd_dp->aborted)) {
5595 sdev_printk(KERN_INFO, sdp, "abort request tag %d\n",
5596 scsi_cmd_to_rq(cmnd)->tag);
5597 blk_abort_request(scsi_cmd_to_rq(cmnd));
5598 atomic_set(&sdeb_inject_pending, 0);
5599 sd_dp->aborted = false;
5602 if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result))
5603 sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__,
5604 num_in_q, (inject ? "<inject> " : ""), "status: TASK SET FULL");
5607 respond_in_thread: /* call back to mid-layer using invocation thread */
5608 cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
5609 cmnd->result &= ~SDEG_RES_IMMED_MASK;
5610 if (cmnd->result == 0 && scsi_result != 0)
5611 cmnd->result = scsi_result;
5616 /* Note: The following macros create attribute files in the
5617 /sys/module/scsi_debug/parameters directory. Unfortunately this
5618 driver is unaware of a change and cannot trigger auxiliary actions
5619 as it can when the corresponding attribute in the
5620 /sys/bus/pseudo/drivers/scsi_debug directory is changed.
5622 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
5623 module_param_named(ato, sdebug_ato, int, S_IRUGO);
5624 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
5625 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
5626 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
5627 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
5628 module_param_named(dif, sdebug_dif, int, S_IRUGO);
5629 module_param_named(dix, sdebug_dix, int, S_IRUGO);
5630 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
5631 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
5632 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
5633 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
5634 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
5635 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
5636 module_param_string(inq_product, sdebug_inq_product_id,
5637 sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
5638 module_param_string(inq_rev, sdebug_inq_product_rev,
5639 sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
5640 module_param_string(inq_vendor, sdebug_inq_vendor_id,
5641 sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
5642 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
5643 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
5644 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
5645 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
5646 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
5647 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
5648 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
5649 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
5650 module_param_named(medium_error_count, sdebug_medium_error_count, int,
5652 module_param_named(medium_error_start, sdebug_medium_error_start, int,
5654 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
5655 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
5656 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
5657 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
5658 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
5659 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
5660 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
5661 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
5662 module_param_named(per_host_store, sdebug_per_host_store, bool,
5664 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
5665 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
5666 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
5667 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
5668 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
5669 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
5670 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
5671 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
5672 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
5673 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
5674 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
5675 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
5676 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
5677 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
5678 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
5679 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
5680 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
5681 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
5683 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
5684 module_param_named(write_same_length, sdebug_write_same_length, int,
5686 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
5687 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
5688 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
5689 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
5691 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
5692 MODULE_DESCRIPTION("SCSI debug adapter driver");
5693 MODULE_LICENSE("GPL");
5694 MODULE_VERSION(SDEBUG_VERSION);
5696 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
5697 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
5698 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
5699 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
5700 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
5701 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
5702 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
5703 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
5704 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
5705 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
5706 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
5707 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
5708 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
5709 MODULE_PARM_DESC(host_max_queue,
5710 "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
5711 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
5712 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
5713 SDEBUG_VERSION "\")");
5714 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
5715 MODULE_PARM_DESC(lbprz,
5716 "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
5717 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
5718 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
5719 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
5720 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
5721 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
5722 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
5723 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
5724 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
5725 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
5726 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
5727 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
5728 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
5729 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
5730 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
5731 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
5732 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
5733 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
5734 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
5735 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
5736 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
5737 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
5738 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
5739 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
5740 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
5741 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
5742 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
5743 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
5744 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
5745 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
5746 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
5747 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
5748 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
5749 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
5750 MODULE_PARM_DESC(uuid_ctl,
5751 "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
5752 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
5753 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
5754 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
5755 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
5756 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
5757 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
5758 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
5759 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
5761 #define SDEBUG_INFO_LEN 256
5762 static char sdebug_info[SDEBUG_INFO_LEN];
5764 static const char *scsi_debug_info(struct Scsi_Host *shp)
5768 k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
5769 my_name, SDEBUG_VERSION, sdebug_version_date);
5770 if (k >= (SDEBUG_INFO_LEN - 1))
5772 scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
5773 " dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
5774 sdebug_dev_size_mb, sdebug_opts, submit_queues,
5775 "statistics", (int)sdebug_statistics);
5779 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
5780 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
5785 int minLen = length > 15 ? 15 : length;
5787 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
5789 memcpy(arr, buffer, minLen);
5791 if (1 != sscanf(arr, "%d", &opts))
5794 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5795 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5796 if (sdebug_every_nth != 0)
5801 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
5802 * same for each scsi_debug host (if more than one). Some of the counters
5803 * output are not atomics so might be inaccurate in a busy system. */
5804 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
5807 struct sdebug_queue *sqp;
5808 struct sdebug_host_info *sdhp;
5810 seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
5811 SDEBUG_VERSION, sdebug_version_date);
5812 seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
5813 sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
5814 sdebug_opts, sdebug_every_nth);
5815 seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
5816 sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
5817 sdebug_sector_size, "bytes");
5818 seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
5819 sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
5821 seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
5822 num_dev_resets, num_target_resets, num_bus_resets,
5824 seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
5825 dix_reads, dix_writes, dif_errors);
5826 seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
5828 seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
5829 atomic_read(&sdebug_cmnd_count),
5830 atomic_read(&sdebug_completions),
5831 "miss_cpus", atomic_read(&sdebug_miss_cpus),
5832 atomic_read(&sdebug_a_tsf),
5833 atomic_read(&sdeb_mq_poll_count));
5835 seq_printf(m, "submit_queues=%d\n", submit_queues);
5836 for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
5837 seq_printf(m, " queue %d:\n", j);
5838 f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
5839 if (f != sdebug_max_queue) {
5840 l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
5841 seq_printf(m, " in_use_bm BUSY: %s: %d,%d\n",
5842 "first,last bits", f, l);
5846 seq_printf(m, "this host_no=%d\n", host->host_no);
5847 if (!xa_empty(per_store_ap)) {
5850 unsigned long l_idx;
5851 struct sdeb_store_info *sip;
5853 seq_puts(m, "\nhost list:\n");
5855 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5857 seq_printf(m, " %d: host_no=%d, si_idx=%d\n", j,
5858 sdhp->shost->host_no, idx);
5861 seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
5862 sdeb_most_recent_idx);
5864 xa_for_each(per_store_ap, l_idx, sip) {
5865 niu = xa_get_mark(per_store_ap, l_idx,
5866 SDEB_XA_NOT_IN_USE);
5868 seq_printf(m, " %d: idx=%d%s\n", j, idx,
5869 (niu ? " not_in_use" : ""));
5876 static ssize_t delay_show(struct device_driver *ddp, char *buf)
5878 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
5880 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
5881 * of delay is jiffies.
5883 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
5888 if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
5890 if (sdebug_jdelay != jdelay) {
5892 struct sdebug_queue *sqp;
5894 block_unblock_all_queues(true);
5895 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5897 k = find_first_bit(sqp->in_use_bm,
5899 if (k != sdebug_max_queue) {
5900 res = -EBUSY; /* queued commands */
5905 sdebug_jdelay = jdelay;
5908 block_unblock_all_queues(false);
5914 static DRIVER_ATTR_RW(delay);
5916 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
5918 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
5920 /* Returns -EBUSY if ndelay is being changed and commands are queued */
5921 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
5922 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
5927 if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
5928 (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
5930 if (sdebug_ndelay != ndelay) {
5932 struct sdebug_queue *sqp;
5934 block_unblock_all_queues(true);
5935 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
5937 k = find_first_bit(sqp->in_use_bm,
5939 if (k != sdebug_max_queue) {
5940 res = -EBUSY; /* queued commands */
5945 sdebug_ndelay = ndelay;
5946 sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
5949 block_unblock_all_queues(false);
5955 static DRIVER_ATTR_RW(ndelay);
5957 static ssize_t opts_show(struct device_driver *ddp, char *buf)
5959 return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
5962 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
5968 if (sscanf(buf, "%10s", work) == 1) {
5969 if (strncasecmp(work, "0x", 2) == 0) {
5970 if (kstrtoint(work + 2, 16, &opts) == 0)
5973 if (kstrtoint(work, 10, &opts) == 0)
5980 sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
5981 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
5985 static DRIVER_ATTR_RW(opts);
5987 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
5989 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
5991 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
5996 /* Cannot change from or to TYPE_ZBC with sysfs */
5997 if (sdebug_ptype == TYPE_ZBC)
6000 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6008 static DRIVER_ATTR_RW(ptype);
6010 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6012 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6014 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6019 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6025 static DRIVER_ATTR_RW(dsense);
6027 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6029 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6031 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6036 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6037 bool want_store = (n == 0);
6038 struct sdebug_host_info *sdhp;
6041 sdebug_fake_rw = (sdebug_fake_rw > 0);
6042 if (sdebug_fake_rw == n)
6043 return count; /* not transitioning so do nothing */
6045 if (want_store) { /* 1 --> 0 transition, set up store */
6046 if (sdeb_first_idx < 0) {
6047 idx = sdebug_add_store();
6051 idx = sdeb_first_idx;
6052 xa_clear_mark(per_store_ap, idx,
6053 SDEB_XA_NOT_IN_USE);
6055 /* make all hosts use same store */
6056 list_for_each_entry(sdhp, &sdebug_host_list,
6058 if (sdhp->si_idx != idx) {
6059 xa_set_mark(per_store_ap, sdhp->si_idx,
6060 SDEB_XA_NOT_IN_USE);
6064 sdeb_most_recent_idx = idx;
6065 } else { /* 0 --> 1 transition is trigger for shrink */
6066 sdebug_erase_all_stores(true /* apart from first */);
6073 static DRIVER_ATTR_RW(fake_rw);
6075 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6077 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6079 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6084 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6085 sdebug_no_lun_0 = n;
6090 static DRIVER_ATTR_RW(no_lun_0);
6092 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6094 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6096 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6101 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6102 sdebug_num_tgts = n;
6103 sdebug_max_tgts_luns();
6108 static DRIVER_ATTR_RW(num_tgts);
6110 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6112 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6114 static DRIVER_ATTR_RO(dev_size_mb);
6116 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6118 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6121 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6126 if (kstrtobool(buf, &v))
6129 sdebug_per_host_store = v;
6132 static DRIVER_ATTR_RW(per_host_store);
6134 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6136 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6138 static DRIVER_ATTR_RO(num_parts);
6140 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6142 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6144 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6150 if (sscanf(buf, "%10s", work) == 1) {
6151 if (strncasecmp(work, "0x", 2) == 0) {
6152 if (kstrtoint(work + 2, 16, &nth) == 0)
6153 goto every_nth_done;
6155 if (kstrtoint(work, 10, &nth) == 0)
6156 goto every_nth_done;
6162 sdebug_every_nth = nth;
6163 if (nth && !sdebug_statistics) {
6164 pr_info("every_nth needs statistics=1, set it\n");
6165 sdebug_statistics = true;
6170 static DRIVER_ATTR_RW(every_nth);
6172 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6174 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6176 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6182 if (kstrtoint(buf, 0, &n))
6185 if (n > (int)SAM_LUN_AM_FLAT) {
6186 pr_warn("only LUN address methods 0 and 1 are supported\n");
6189 changed = ((int)sdebug_lun_am != n);
6191 if (changed && sdebug_scsi_level >= 5) { /* >= SPC-3 */
6192 struct sdebug_host_info *sdhp;
6193 struct sdebug_dev_info *dp;
6195 spin_lock(&sdebug_host_list_lock);
6196 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6197 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6198 set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6201 spin_unlock(&sdebug_host_list_lock);
6207 static DRIVER_ATTR_RW(lun_format);
6209 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6211 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6213 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6219 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6221 pr_warn("max_luns can be no more than 256\n");
6224 changed = (sdebug_max_luns != n);
6225 sdebug_max_luns = n;
6226 sdebug_max_tgts_luns();
6227 if (changed && (sdebug_scsi_level >= 5)) { /* >= SPC-3 */
6228 struct sdebug_host_info *sdhp;
6229 struct sdebug_dev_info *dp;
6231 spin_lock(&sdebug_host_list_lock);
6232 list_for_each_entry(sdhp, &sdebug_host_list,
6234 list_for_each_entry(dp, &sdhp->dev_info_list,
6236 set_bit(SDEBUG_UA_LUNS_CHANGED,
6240 spin_unlock(&sdebug_host_list_lock);
6246 static DRIVER_ATTR_RW(max_luns);
6248 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6250 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6252 /* N.B. max_queue can be changed while there are queued commands. In flight
6253 * commands beyond the new max_queue will be completed. */
6254 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6258 struct sdebug_queue *sqp;
6260 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6261 (n <= SDEBUG_CANQUEUE) &&
6262 (sdebug_host_max_queue == 0)) {
6263 block_unblock_all_queues(true);
6265 for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
6267 a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
6271 sdebug_max_queue = n;
6272 if (k == SDEBUG_CANQUEUE)
6273 atomic_set(&retired_max_queue, 0);
6275 atomic_set(&retired_max_queue, k + 1);
6277 atomic_set(&retired_max_queue, 0);
6278 block_unblock_all_queues(false);
6283 static DRIVER_ATTR_RW(max_queue);
6285 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6287 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6291 * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6292 * in range [0, sdebug_host_max_queue), we can't change it.
6294 static DRIVER_ATTR_RO(host_max_queue);
6296 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6298 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6300 static DRIVER_ATTR_RO(no_uld);
6302 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6304 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6306 static DRIVER_ATTR_RO(scsi_level);
6308 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6310 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6312 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6318 /* Ignore capacity change for ZBC drives for now */
6319 if (sdeb_zbc_in_use)
6322 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6323 changed = (sdebug_virtual_gb != n);
6324 sdebug_virtual_gb = n;
6325 sdebug_capacity = get_sdebug_capacity();
6327 struct sdebug_host_info *sdhp;
6328 struct sdebug_dev_info *dp;
6330 spin_lock(&sdebug_host_list_lock);
6331 list_for_each_entry(sdhp, &sdebug_host_list,
6333 list_for_each_entry(dp, &sdhp->dev_info_list,
6335 set_bit(SDEBUG_UA_CAPACITY_CHANGED,
6339 spin_unlock(&sdebug_host_list_lock);
6345 static DRIVER_ATTR_RW(virtual_gb);
6347 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
6349 /* absolute number of hosts currently active is what is shown */
6350 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
6353 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
6358 struct sdeb_store_info *sip;
6359 bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
6362 if (sscanf(buf, "%d", &delta_hosts) != 1)
6364 if (delta_hosts > 0) {
6368 xa_for_each_marked(per_store_ap, idx, sip,
6369 SDEB_XA_NOT_IN_USE) {
6370 sdeb_most_recent_idx = (int)idx;
6374 if (found) /* re-use case */
6375 sdebug_add_host_helper((int)idx);
6377 sdebug_do_add_host(true);
6379 sdebug_do_add_host(false);
6381 } while (--delta_hosts);
6382 } else if (delta_hosts < 0) {
6384 sdebug_do_remove_host(false);
6385 } while (++delta_hosts);
6389 static DRIVER_ATTR_RW(add_host);
6391 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
6393 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
6395 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
6400 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6401 sdebug_vpd_use_hostno = n;
6406 static DRIVER_ATTR_RW(vpd_use_hostno);
6408 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
6410 return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
6412 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
6417 if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
6419 sdebug_statistics = true;
6421 clear_queue_stats();
6422 sdebug_statistics = false;
6428 static DRIVER_ATTR_RW(statistics);
6430 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
6432 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
6434 static DRIVER_ATTR_RO(sector_size);
6436 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
6438 return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
6440 static DRIVER_ATTR_RO(submit_queues);
6442 static ssize_t dix_show(struct device_driver *ddp, char *buf)
6444 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
6446 static DRIVER_ATTR_RO(dix);
6448 static ssize_t dif_show(struct device_driver *ddp, char *buf)
6450 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
6452 static DRIVER_ATTR_RO(dif);
6454 static ssize_t guard_show(struct device_driver *ddp, char *buf)
6456 return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
6458 static DRIVER_ATTR_RO(guard);
6460 static ssize_t ato_show(struct device_driver *ddp, char *buf)
6462 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
6464 static DRIVER_ATTR_RO(ato);
6466 static ssize_t map_show(struct device_driver *ddp, char *buf)
6470 if (!scsi_debug_lbp())
6471 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
6472 sdebug_store_sectors);
6474 if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
6475 struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
6478 count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
6479 (int)map_size, sip->map_storep);
6481 buf[count++] = '\n';
6486 static DRIVER_ATTR_RO(map);
6488 static ssize_t random_show(struct device_driver *ddp, char *buf)
6490 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
6493 static ssize_t random_store(struct device_driver *ddp, const char *buf,
6498 if (kstrtobool(buf, &v))
6504 static DRIVER_ATTR_RW(random);
6506 static ssize_t removable_show(struct device_driver *ddp, char *buf)
6508 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
6510 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
6515 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6516 sdebug_removable = (n > 0);
6521 static DRIVER_ATTR_RW(removable);
6523 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
6525 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
6527 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
6528 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
6533 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6534 sdebug_host_lock = (n > 0);
6539 static DRIVER_ATTR_RW(host_lock);
6541 static ssize_t strict_show(struct device_driver *ddp, char *buf)
6543 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
6545 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
6550 if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6551 sdebug_strict = (n > 0);
6556 static DRIVER_ATTR_RW(strict);
6558 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
6560 return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
6562 static DRIVER_ATTR_RO(uuid_ctl);
6564 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
6566 return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
6568 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
6573 ret = kstrtoint(buf, 0, &n);
6577 all_config_cdb_len();
6580 static DRIVER_ATTR_RW(cdb_len);
6582 static const char * const zbc_model_strs_a[] = {
6583 [BLK_ZONED_NONE] = "none",
6584 [BLK_ZONED_HA] = "host-aware",
6585 [BLK_ZONED_HM] = "host-managed",
6588 static const char * const zbc_model_strs_b[] = {
6589 [BLK_ZONED_NONE] = "no",
6590 [BLK_ZONED_HA] = "aware",
6591 [BLK_ZONED_HM] = "managed",
6594 static const char * const zbc_model_strs_c[] = {
6595 [BLK_ZONED_NONE] = "0",
6596 [BLK_ZONED_HA] = "1",
6597 [BLK_ZONED_HM] = "2",
6600 static int sdeb_zbc_model_str(const char *cp)
6602 int res = sysfs_match_string(zbc_model_strs_a, cp);
6605 res = sysfs_match_string(zbc_model_strs_b, cp);
6607 res = sysfs_match_string(zbc_model_strs_c, cp);
6615 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
6617 return scnprintf(buf, PAGE_SIZE, "%s\n",
6618 zbc_model_strs_a[sdeb_zbc_model]);
6620 static DRIVER_ATTR_RO(zbc);
6622 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
6624 return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
6626 static DRIVER_ATTR_RO(tur_ms_to_ready);
6628 /* Note: The following array creates attribute files in the
6629 /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
6630 files (over those found in the /sys/module/scsi_debug/parameters
6631 directory) is that auxiliary actions can be triggered when an attribute
6632 is changed. For example see: add_host_store() above.
6635 static struct attribute *sdebug_drv_attrs[] = {
6636 &driver_attr_delay.attr,
6637 &driver_attr_opts.attr,
6638 &driver_attr_ptype.attr,
6639 &driver_attr_dsense.attr,
6640 &driver_attr_fake_rw.attr,
6641 &driver_attr_host_max_queue.attr,
6642 &driver_attr_no_lun_0.attr,
6643 &driver_attr_num_tgts.attr,
6644 &driver_attr_dev_size_mb.attr,
6645 &driver_attr_num_parts.attr,
6646 &driver_attr_every_nth.attr,
6647 &driver_attr_lun_format.attr,
6648 &driver_attr_max_luns.attr,
6649 &driver_attr_max_queue.attr,
6650 &driver_attr_no_uld.attr,
6651 &driver_attr_scsi_level.attr,
6652 &driver_attr_virtual_gb.attr,
6653 &driver_attr_add_host.attr,
6654 &driver_attr_per_host_store.attr,
6655 &driver_attr_vpd_use_hostno.attr,
6656 &driver_attr_sector_size.attr,
6657 &driver_attr_statistics.attr,
6658 &driver_attr_submit_queues.attr,
6659 &driver_attr_dix.attr,
6660 &driver_attr_dif.attr,
6661 &driver_attr_guard.attr,
6662 &driver_attr_ato.attr,
6663 &driver_attr_map.attr,
6664 &driver_attr_random.attr,
6665 &driver_attr_removable.attr,
6666 &driver_attr_host_lock.attr,
6667 &driver_attr_ndelay.attr,
6668 &driver_attr_strict.attr,
6669 &driver_attr_uuid_ctl.attr,
6670 &driver_attr_cdb_len.attr,
6671 &driver_attr_tur_ms_to_ready.attr,
6672 &driver_attr_zbc.attr,
6675 ATTRIBUTE_GROUPS(sdebug_drv);
6677 static struct device *pseudo_primary;
6679 static int __init scsi_debug_init(void)
6681 bool want_store = (sdebug_fake_rw == 0);
6683 int k, ret, hosts_to_add;
6686 ramdisk_lck_a[0] = &atomic_rw;
6687 ramdisk_lck_a[1] = &atomic_rw2;
6688 atomic_set(&retired_max_queue, 0);
6690 if (sdebug_ndelay >= 1000 * 1000 * 1000) {
6691 pr_warn("ndelay must be less than 1 second, ignored\n");
6693 } else if (sdebug_ndelay > 0)
6694 sdebug_jdelay = JDELAY_OVERRIDDEN;
6696 switch (sdebug_sector_size) {
6703 pr_err("invalid sector_size %d\n", sdebug_sector_size);
6707 switch (sdebug_dif) {
6708 case T10_PI_TYPE0_PROTECTION:
6710 case T10_PI_TYPE1_PROTECTION:
6711 case T10_PI_TYPE2_PROTECTION:
6712 case T10_PI_TYPE3_PROTECTION:
6713 have_dif_prot = true;
6717 pr_err("dif must be 0, 1, 2 or 3\n");
6721 if (sdebug_num_tgts < 0) {
6722 pr_err("num_tgts must be >= 0\n");
6726 if (sdebug_guard > 1) {
6727 pr_err("guard must be 0 or 1\n");
6731 if (sdebug_ato > 1) {
6732 pr_err("ato must be 0 or 1\n");
6736 if (sdebug_physblk_exp > 15) {
6737 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
6741 sdebug_lun_am = sdebug_lun_am_i;
6742 if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
6743 pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
6744 sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
6747 if (sdebug_max_luns > 256) {
6748 if (sdebug_max_luns > 16384) {
6749 pr_warn("max_luns can be no more than 16384, use default\n");
6750 sdebug_max_luns = DEF_MAX_LUNS;
6752 sdebug_lun_am = SAM_LUN_AM_FLAT;
6755 if (sdebug_lowest_aligned > 0x3fff) {
6756 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
6760 if (submit_queues < 1) {
6761 pr_err("submit_queues must be 1 or more\n");
6765 if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
6766 pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
6770 if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
6771 (sdebug_host_max_queue < 0)) {
6772 pr_err("host_max_queue must be in range [0 %d]\n",
6777 if (sdebug_host_max_queue &&
6778 (sdebug_max_queue != sdebug_host_max_queue)) {
6779 sdebug_max_queue = sdebug_host_max_queue;
6780 pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
6784 sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
6786 if (sdebug_q_arr == NULL)
6788 for (k = 0; k < submit_queues; ++k)
6789 spin_lock_init(&sdebug_q_arr[k].qc_lock);
6792 * check for host managed zoned block device specified with
6793 * ptype=0x14 or zbc=XXX.
6795 if (sdebug_ptype == TYPE_ZBC) {
6796 sdeb_zbc_model = BLK_ZONED_HM;
6797 } else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
6798 k = sdeb_zbc_model_str(sdeb_zbc_model_s);
6804 switch (sdeb_zbc_model) {
6805 case BLK_ZONED_NONE:
6807 sdebug_ptype = TYPE_DISK;
6810 sdebug_ptype = TYPE_ZBC;
6813 pr_err("Invalid ZBC model\n");
6818 if (sdeb_zbc_model != BLK_ZONED_NONE) {
6819 sdeb_zbc_in_use = true;
6820 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6821 sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
6824 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
6825 sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
6826 if (sdebug_dev_size_mb < 1)
6827 sdebug_dev_size_mb = 1; /* force minimum 1 MB ramdisk */
6828 sz = (unsigned long)sdebug_dev_size_mb * 1048576;
6829 sdebug_store_sectors = sz / sdebug_sector_size;
6830 sdebug_capacity = get_sdebug_capacity();
6832 /* play around with geometry, don't waste too much on track 0 */
6834 sdebug_sectors_per = 32;
6835 if (sdebug_dev_size_mb >= 256)
6837 else if (sdebug_dev_size_mb >= 16)
6839 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6840 (sdebug_sectors_per * sdebug_heads);
6841 if (sdebug_cylinders_per >= 1024) {
6842 /* other LLDs do this; implies >= 1GB ram disk ... */
6844 sdebug_sectors_per = 63;
6845 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
6846 (sdebug_sectors_per * sdebug_heads);
6848 if (scsi_debug_lbp()) {
6849 sdebug_unmap_max_blocks =
6850 clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
6852 sdebug_unmap_max_desc =
6853 clamp(sdebug_unmap_max_desc, 0U, 256U);
6855 sdebug_unmap_granularity =
6856 clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
6858 if (sdebug_unmap_alignment &&
6859 sdebug_unmap_granularity <=
6860 sdebug_unmap_alignment) {
6861 pr_err("ERR: unmap_granularity <= unmap_alignment\n");
6866 xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
6868 idx = sdebug_add_store();
6875 pseudo_primary = root_device_register("pseudo_0");
6876 if (IS_ERR(pseudo_primary)) {
6877 pr_warn("root_device_register() error\n");
6878 ret = PTR_ERR(pseudo_primary);
6881 ret = bus_register(&pseudo_lld_bus);
6883 pr_warn("bus_register error: %d\n", ret);
6886 ret = driver_register(&sdebug_driverfs_driver);
6888 pr_warn("driver_register error: %d\n", ret);
6892 hosts_to_add = sdebug_add_host;
6893 sdebug_add_host = 0;
6895 for (k = 0; k < hosts_to_add; k++) {
6896 if (want_store && k == 0) {
6897 ret = sdebug_add_host_helper(idx);
6899 pr_err("add_host_helper k=%d, error=%d\n",
6904 ret = sdebug_do_add_host(want_store &&
6905 sdebug_per_host_store);
6907 pr_err("add_host k=%d error=%d\n", k, -ret);
6913 pr_info("built %d host(s)\n", sdebug_num_hosts);
6918 bus_unregister(&pseudo_lld_bus);
6920 root_device_unregister(pseudo_primary);
6922 sdebug_erase_store(idx, NULL);
6924 kfree(sdebug_q_arr);
6928 static void __exit scsi_debug_exit(void)
6930 int k = sdebug_num_hosts;
6934 sdebug_do_remove_host(true);
6936 driver_unregister(&sdebug_driverfs_driver);
6937 bus_unregister(&pseudo_lld_bus);
6938 root_device_unregister(pseudo_primary);
6940 sdebug_erase_all_stores(false);
6941 xa_destroy(per_store_ap);
6942 kfree(sdebug_q_arr);
6945 device_initcall(scsi_debug_init);
6946 module_exit(scsi_debug_exit);
6948 static void sdebug_release_adapter(struct device *dev)
6950 struct sdebug_host_info *sdbg_host;
6952 sdbg_host = to_sdebug_host(dev);
6956 /* idx must be valid, if sip is NULL then it will be obtained using idx */
6957 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
6962 if (xa_empty(per_store_ap))
6964 sip = xa_load(per_store_ap, idx);
6968 vfree(sip->map_storep);
6969 vfree(sip->dif_storep);
6971 xa_erase(per_store_ap, idx);
6975 /* Assume apart_from_first==false only in shutdown case. */
6976 static void sdebug_erase_all_stores(bool apart_from_first)
6979 struct sdeb_store_info *sip = NULL;
6981 xa_for_each(per_store_ap, idx, sip) {
6982 if (apart_from_first)
6983 apart_from_first = false;
6985 sdebug_erase_store(idx, sip);
6987 if (apart_from_first)
6988 sdeb_most_recent_idx = sdeb_first_idx;
6992 * Returns store xarray new element index (idx) if >=0 else negated errno.
6993 * Limit the number of stores to 65536.
6995 static int sdebug_add_store(void)
6999 unsigned long iflags;
7000 unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7001 struct sdeb_store_info *sip = NULL;
7002 struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7004 sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7008 xa_lock_irqsave(per_store_ap, iflags);
7009 res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7010 if (unlikely(res < 0)) {
7011 xa_unlock_irqrestore(per_store_ap, iflags);
7013 pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7016 sdeb_most_recent_idx = n_idx;
7017 if (sdeb_first_idx < 0)
7018 sdeb_first_idx = n_idx;
7019 xa_unlock_irqrestore(per_store_ap, iflags);
7022 sip->storep = vzalloc(sz);
7024 pr_err("user data oom\n");
7027 if (sdebug_num_parts > 0)
7028 sdebug_build_parts(sip->storep, sz);
7030 /* DIF/DIX: what T10 calls Protection Information (PI) */
7034 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7035 sip->dif_storep = vmalloc(dif_size);
7037 pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7040 if (!sip->dif_storep) {
7041 pr_err("DIX oom\n");
7044 memset(sip->dif_storep, 0xff, dif_size);
7046 /* Logical Block Provisioning */
7047 if (scsi_debug_lbp()) {
7048 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7049 sip->map_storep = vmalloc(array_size(sizeof(long),
7050 BITS_TO_LONGS(map_size)));
7052 pr_info("%lu provisioning blocks\n", map_size);
7054 if (!sip->map_storep) {
7055 pr_err("LBP map oom\n");
7059 bitmap_zero(sip->map_storep, map_size);
7061 /* Map first 1KB for partition table */
7062 if (sdebug_num_parts)
7063 map_region(sip, 0, 2);
7066 rwlock_init(&sip->macc_lck);
7069 sdebug_erase_store((int)n_idx, sip);
7070 pr_warn("%s: failed, errno=%d\n", __func__, -res);
7074 static int sdebug_add_host_helper(int per_host_idx)
7076 int k, devs_per_host, idx;
7077 int error = -ENOMEM;
7078 struct sdebug_host_info *sdbg_host;
7079 struct sdebug_dev_info *sdbg_devinfo, *tmp;
7081 sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7084 idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7085 if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7086 xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7087 sdbg_host->si_idx = idx;
7089 INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7091 devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7092 for (k = 0; k < devs_per_host; k++) {
7093 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7098 spin_lock(&sdebug_host_list_lock);
7099 list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7100 spin_unlock(&sdebug_host_list_lock);
7102 sdbg_host->dev.bus = &pseudo_lld_bus;
7103 sdbg_host->dev.parent = pseudo_primary;
7104 sdbg_host->dev.release = &sdebug_release_adapter;
7105 dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7107 error = device_register(&sdbg_host->dev);
7115 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7117 list_del(&sdbg_devinfo->dev_list);
7118 kfree(sdbg_devinfo->zstate);
7119 kfree(sdbg_devinfo);
7122 pr_warn("%s: failed, errno=%d\n", __func__, -error);
7126 static int sdebug_do_add_host(bool mk_new_store)
7128 int ph_idx = sdeb_most_recent_idx;
7131 ph_idx = sdebug_add_store();
7135 return sdebug_add_host_helper(ph_idx);
7138 static void sdebug_do_remove_host(bool the_end)
7141 struct sdebug_host_info *sdbg_host = NULL;
7142 struct sdebug_host_info *sdbg_host2;
7144 spin_lock(&sdebug_host_list_lock);
7145 if (!list_empty(&sdebug_host_list)) {
7146 sdbg_host = list_entry(sdebug_host_list.prev,
7147 struct sdebug_host_info, host_list);
7148 idx = sdbg_host->si_idx;
7150 if (!the_end && idx >= 0) {
7153 list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7154 if (sdbg_host2 == sdbg_host)
7156 if (idx == sdbg_host2->si_idx) {
7162 xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7163 if (idx == sdeb_most_recent_idx)
7164 --sdeb_most_recent_idx;
7168 list_del(&sdbg_host->host_list);
7169 spin_unlock(&sdebug_host_list_lock);
7174 device_unregister(&sdbg_host->dev);
7178 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7181 struct sdebug_dev_info *devip;
7183 block_unblock_all_queues(true);
7184 devip = (struct sdebug_dev_info *)sdev->hostdata;
7185 if (NULL == devip) {
7186 block_unblock_all_queues(false);
7189 num_in_q = atomic_read(&devip->num_in_q);
7191 if (qdepth > SDEBUG_CANQUEUE) {
7192 qdepth = SDEBUG_CANQUEUE;
7193 pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7194 qdepth, SDEBUG_CANQUEUE);
7198 if (qdepth != sdev->queue_depth)
7199 scsi_change_queue_depth(sdev, qdepth);
7201 if (SDEBUG_OPT_Q_NOISE & sdebug_opts) {
7202 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
7203 __func__, qdepth, num_in_q);
7205 block_unblock_all_queues(false);
7206 return sdev->queue_depth;
7209 static bool fake_timeout(struct scsi_cmnd *scp)
7211 if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7212 if (sdebug_every_nth < -1)
7213 sdebug_every_nth = -1;
7214 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7215 return true; /* ignore command causing timeout */
7216 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7217 scsi_medium_access_command(scp))
7218 return true; /* time out reads and writes */
7223 /* Response to TUR or media access command when device stopped */
7224 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7228 ktime_t now_ts = ktime_get_boottime();
7229 struct scsi_device *sdp = scp->device;
7231 stopped_state = atomic_read(&devip->stopped);
7232 if (stopped_state == 2) {
7233 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7234 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7235 if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7236 /* tur_ms_to_ready timer extinguished */
7237 atomic_set(&devip->stopped, 0);
7241 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7243 sdev_printk(KERN_INFO, sdp,
7244 "%s: Not ready: in process of becoming ready\n", my_name);
7245 if (scp->cmnd[0] == TEST_UNIT_READY) {
7246 u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7248 if (diff_ns <= tur_nanosecs_to_ready)
7249 diff_ns = tur_nanosecs_to_ready - diff_ns;
7251 diff_ns = tur_nanosecs_to_ready;
7252 /* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7253 do_div(diff_ns, 1000000); /* diff_ns becomes milliseconds */
7254 scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7256 return check_condition_result;
7259 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7261 sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7263 return check_condition_result;
7266 static int sdebug_map_queues(struct Scsi_Host *shost)
7270 if (shost->nr_hw_queues == 1)
7273 for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7274 struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7278 if (i == HCTX_TYPE_DEFAULT)
7279 map->nr_queues = submit_queues - poll_queues;
7280 else if (i == HCTX_TYPE_POLL)
7281 map->nr_queues = poll_queues;
7283 if (!map->nr_queues) {
7284 BUG_ON(i == HCTX_TYPE_DEFAULT);
7288 map->queue_offset = qoff;
7289 blk_mq_map_queues(map);
7291 qoff += map->nr_queues;
7298 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
7301 bool retiring = false;
7302 int num_entries = 0;
7303 unsigned int qc_idx = 0;
7304 unsigned long iflags;
7305 ktime_t kt_from_boot = ktime_get_boottime();
7306 struct sdebug_queue *sqp;
7307 struct sdebug_queued_cmd *sqcp;
7308 struct scsi_cmnd *scp;
7309 struct sdebug_dev_info *devip;
7310 struct sdebug_defer *sd_dp;
7312 sqp = sdebug_q_arr + queue_num;
7313 spin_lock_irqsave(&sqp->qc_lock, iflags);
7315 for (first = true; first || qc_idx + 1 < sdebug_max_queue; ) {
7317 qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
7320 qc_idx = find_next_bit(sqp->in_use_bm, sdebug_max_queue, qc_idx + 1);
7322 if (unlikely(qc_idx >= sdebug_max_queue))
7325 sqcp = &sqp->qc_arr[qc_idx];
7326 sd_dp = sqcp->sd_dp;
7327 if (unlikely(!sd_dp))
7330 if (unlikely(scp == NULL)) {
7331 pr_err("scp is NULL, queue_num=%d, qc_idx=%u from %s\n",
7332 queue_num, qc_idx, __func__);
7335 if (sd_dp->defer_t == SDEB_DEFER_POLL) {
7336 if (kt_from_boot < sd_dp->cmpl_ts)
7339 } else /* ignoring non REQ_HIPRI requests */
7341 devip = (struct sdebug_dev_info *)scp->device->hostdata;
7343 atomic_dec(&devip->num_in_q);
7345 pr_err("devip=NULL from %s\n", __func__);
7346 if (unlikely(atomic_read(&retired_max_queue) > 0))
7349 sqcp->a_cmnd = NULL;
7350 if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
7351 pr_err("Unexpected completion sqp %p queue_num=%d qc_idx=%u from %s\n",
7352 sqp, queue_num, qc_idx, __func__);
7355 if (unlikely(retiring)) { /* user has reduced max_queue */
7358 retval = atomic_read(&retired_max_queue);
7359 if (qc_idx >= retval) {
7360 pr_err("index %d too large\n", retval);
7363 k = find_last_bit(sqp->in_use_bm, retval);
7364 if ((k < sdebug_max_queue) || (k == retval))
7365 atomic_set(&retired_max_queue, 0);
7367 atomic_set(&retired_max_queue, k + 1);
7369 sd_dp->defer_t = SDEB_DEFER_NONE;
7370 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7371 scsi_done(scp); /* callback to mid level */
7372 spin_lock_irqsave(&sqp->qc_lock, iflags);
7375 spin_unlock_irqrestore(&sqp->qc_lock, iflags);
7376 if (num_entries > 0)
7377 atomic_add(num_entries, &sdeb_mq_poll_count);
7381 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
7382 struct scsi_cmnd *scp)
7385 struct scsi_device *sdp = scp->device;
7386 const struct opcode_info_t *oip;
7387 const struct opcode_info_t *r_oip;
7388 struct sdebug_dev_info *devip;
7389 u8 *cmd = scp->cmnd;
7390 int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
7391 int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
7394 u64 lun_index = sdp->lun & 0x3FFF;
7401 scsi_set_resid(scp, 0);
7402 if (sdebug_statistics) {
7403 atomic_inc(&sdebug_cmnd_count);
7404 inject_now = inject_on_this_cmd();
7408 if (unlikely(sdebug_verbose &&
7409 !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
7414 sb = (int)sizeof(b);
7416 strcpy(b, "too long, over 32 bytes");
7418 for (k = 0, n = 0; k < len && n < sb; ++k)
7419 n += scnprintf(b + n, sb - n, "%02x ",
7422 sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
7423 blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
7425 if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
7426 return SCSI_MLQUEUE_HOST_BUSY;
7427 has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
7428 if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
7431 sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */
7432 oip = &opcode_info_arr[sdeb_i]; /* safe if table consistent */
7433 devip = (struct sdebug_dev_info *)sdp->hostdata;
7434 if (unlikely(!devip)) {
7435 devip = find_build_dev_info(sdp);
7439 if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
7440 atomic_set(&sdeb_inject_pending, 1);
7442 na = oip->num_attached;
7444 if (na) { /* multiple commands with this opcode */
7446 if (FF_SA & r_oip->flags) {
7447 if (F_SA_LOW & oip->flags)
7450 sa = get_unaligned_be16(cmd + 8);
7451 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7452 if (opcode == oip->opcode && sa == oip->sa)
7455 } else { /* since no service action only check opcode */
7456 for (k = 0; k <= na; oip = r_oip->arrp + k++) {
7457 if (opcode == oip->opcode)
7462 if (F_SA_LOW & r_oip->flags)
7463 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
7464 else if (F_SA_HIGH & r_oip->flags)
7465 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
7467 mk_sense_invalid_opcode(scp);
7470 } /* else (when na==0) we assume the oip is a match */
7472 if (unlikely(F_INV_OP & flags)) {
7473 mk_sense_invalid_opcode(scp);
7476 if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
7478 sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
7479 my_name, opcode, " supported for wlun");
7480 mk_sense_invalid_opcode(scp);
7483 if (unlikely(sdebug_strict)) { /* check cdb against mask */
7487 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
7488 rem = ~oip->len_mask[k] & cmd[k];
7490 for (j = 7; j >= 0; --j, rem <<= 1) {
7494 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
7499 if (unlikely(!(F_SKIP_UA & flags) &&
7500 find_first_bit(devip->uas_bm,
7501 SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
7502 errsts = make_ua(scp, devip);
7506 if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
7507 atomic_read(&devip->stopped))) {
7508 errsts = resp_not_ready(scp, devip);
7512 if (sdebug_fake_rw && (F_FAKE_RW & flags))
7514 if (unlikely(sdebug_every_nth)) {
7515 if (fake_timeout(scp))
7516 return 0; /* ignore command: make trouble */
7518 if (likely(oip->pfp))
7519 pfp = oip->pfp; /* calls a resp_* function */
7521 pfp = r_pfp; /* if leaf function ptr NULL, try the root's */
7524 if (F_DELAY_OVERR & flags) /* cmds like INQUIRY respond asap */
7525 return schedule_resp(scp, devip, errsts, pfp, 0, 0);
7526 else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
7527 sdebug_ndelay > 10000)) {
7529 * Skip long delays if ndelay <= 10 microseconds. Otherwise
7530 * for Start Stop Unit (SSU) want at least 1 second delay and
7531 * if sdebug_jdelay>1 want a long delay of that many seconds.
7532 * For Synchronize Cache want 1/20 of SSU's delay.
7534 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
7535 int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
7537 jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
7538 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
7540 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
7543 return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
7545 return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
7548 static struct scsi_host_template sdebug_driver_template = {
7549 .show_info = scsi_debug_show_info,
7550 .write_info = scsi_debug_write_info,
7551 .proc_name = sdebug_proc_name,
7552 .name = "SCSI DEBUG",
7553 .info = scsi_debug_info,
7554 .slave_alloc = scsi_debug_slave_alloc,
7555 .slave_configure = scsi_debug_slave_configure,
7556 .slave_destroy = scsi_debug_slave_destroy,
7557 .ioctl = scsi_debug_ioctl,
7558 .queuecommand = scsi_debug_queuecommand,
7559 .change_queue_depth = sdebug_change_qdepth,
7560 .map_queues = sdebug_map_queues,
7561 .mq_poll = sdebug_blk_mq_poll,
7562 .eh_abort_handler = scsi_debug_abort,
7563 .eh_device_reset_handler = scsi_debug_device_reset,
7564 .eh_target_reset_handler = scsi_debug_target_reset,
7565 .eh_bus_reset_handler = scsi_debug_bus_reset,
7566 .eh_host_reset_handler = scsi_debug_host_reset,
7567 .can_queue = SDEBUG_CANQUEUE,
7569 .sg_tablesize = SG_MAX_SEGMENTS,
7570 .cmd_per_lun = DEF_CMD_PER_LUN,
7572 .max_segment_size = -1U,
7573 .module = THIS_MODULE,
7574 .track_queue_depth = 1,
7577 static int sdebug_driver_probe(struct device *dev)
7580 struct sdebug_host_info *sdbg_host;
7581 struct Scsi_Host *hpnt;
7584 sdbg_host = to_sdebug_host(dev);
7586 sdebug_driver_template.can_queue = sdebug_max_queue;
7587 sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
7588 if (!sdebug_clustering)
7589 sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
7591 hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));
7593 pr_err("scsi_host_alloc failed\n");
7597 if (submit_queues > nr_cpu_ids) {
7598 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
7599 my_name, submit_queues, nr_cpu_ids);
7600 submit_queues = nr_cpu_ids;
7603 * Decide whether to tell scsi subsystem that we want mq. The
7604 * following should give the same answer for each host.
7606 hpnt->nr_hw_queues = submit_queues;
7607 if (sdebug_host_max_queue)
7608 hpnt->host_tagset = 1;
7610 /* poll queues are possible for nr_hw_queues > 1 */
7611 if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
7612 pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
7613 my_name, poll_queues, hpnt->nr_hw_queues);
7618 * Poll queues don't need interrupts, but we need at least one I/O queue
7619 * left over for non-polled I/O.
7620 * If condition not met, trim poll_queues to 1 (just for simplicity).
7622 if (poll_queues >= submit_queues) {
7623 if (submit_queues < 3)
7624 pr_warn("%s: trim poll_queues to 1\n", my_name);
7626 pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
7627 my_name, submit_queues - 1);
7633 sdbg_host->shost = hpnt;
7634 *((struct sdebug_host_info **)hpnt->hostdata) = sdbg_host;
7635 if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
7636 hpnt->max_id = sdebug_num_tgts + 1;
7638 hpnt->max_id = sdebug_num_tgts;
7639 /* = sdebug_max_luns; */
7640 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
7644 switch (sdebug_dif) {
7646 case T10_PI_TYPE1_PROTECTION:
7647 hprot = SHOST_DIF_TYPE1_PROTECTION;
7649 hprot |= SHOST_DIX_TYPE1_PROTECTION;
7652 case T10_PI_TYPE2_PROTECTION:
7653 hprot = SHOST_DIF_TYPE2_PROTECTION;
7655 hprot |= SHOST_DIX_TYPE2_PROTECTION;
7658 case T10_PI_TYPE3_PROTECTION:
7659 hprot = SHOST_DIF_TYPE3_PROTECTION;
7661 hprot |= SHOST_DIX_TYPE3_PROTECTION;
7666 hprot |= SHOST_DIX_TYPE0_PROTECTION;
7670 scsi_host_set_prot(hpnt, hprot);
7672 if (have_dif_prot || sdebug_dix)
7673 pr_info("host protection%s%s%s%s%s%s%s\n",
7674 (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
7675 (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
7676 (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
7677 (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
7678 (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
7679 (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
7680 (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
7682 if (sdebug_guard == 1)
7683 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
7685 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
7687 sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
7688 sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
7689 if (sdebug_every_nth) /* need stats counters for every_nth */
7690 sdebug_statistics = true;
7691 error = scsi_add_host(hpnt, &sdbg_host->dev);
7693 pr_err("scsi_add_host failed\n");
7695 scsi_host_put(hpnt);
7697 scsi_scan_host(hpnt);
7703 static void sdebug_driver_remove(struct device *dev)
7705 struct sdebug_host_info *sdbg_host;
7706 struct sdebug_dev_info *sdbg_devinfo, *tmp;
7708 sdbg_host = to_sdebug_host(dev);
7710 scsi_remove_host(sdbg_host->shost);
7712 list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7714 list_del(&sdbg_devinfo->dev_list);
7715 kfree(sdbg_devinfo->zstate);
7716 kfree(sdbg_devinfo);
7719 scsi_host_put(sdbg_host->shost);
7722 static int pseudo_lld_bus_match(struct device *dev,
7723 struct device_driver *dev_driver)
7728 static struct bus_type pseudo_lld_bus = {
7730 .match = pseudo_lld_bus_match,
7731 .probe = sdebug_driver_probe,
7732 .remove = sdebug_driver_remove,
7733 .drv_groups = sdebug_drv_groups,