]> Git Repo - J-linux.git/blob - drivers/scsi/scsi_debug.c
scsi: scsi_debug: Implement GET STREAM STATUS
[J-linux.git] / drivers / scsi / scsi_debug.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * vvvvvvvvvvvvvvvvvvvvvvv Original vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
4  *  Copyright (C) 1992  Eric Youngdale
5  *  Simulate a host adapter with 2 disks attached.  Do a lot of checking
6  *  to make sure that we are not getting blocks mixed up, and PANIC if
7  *  anything out of the ordinary is seen.
8  * ^^^^^^^^^^^^^^^^^^^^^^^ Original ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9  *
10  * Copyright (C) 2001 - 2021 Douglas Gilbert
11  *
12  *  For documentation see http://sg.danny.cz/sg/scsi_debug.html
13  */
14
15
16 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
17
18 #include <linux/module.h>
19 #include <linux/align.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/fs.h>
27 #include <linux/init.h>
28 #include <linux/proc_fs.h>
29 #include <linux/vmalloc.h>
30 #include <linux/moduleparam.h>
31 #include <linux/scatterlist.h>
32 #include <linux/blkdev.h>
33 #include <linux/crc-t10dif.h>
34 #include <linux/spinlock.h>
35 #include <linux/interrupt.h>
36 #include <linux/atomic.h>
37 #include <linux/hrtimer.h>
38 #include <linux/uuid.h>
39 #include <linux/t10-pi.h>
40 #include <linux/msdos_partition.h>
41 #include <linux/random.h>
42 #include <linux/xarray.h>
43 #include <linux/prefetch.h>
44 #include <linux/debugfs.h>
45 #include <linux/async.h>
46 #include <linux/cleanup.h>
47
48 #include <net/checksum.h>
49
50 #include <asm/unaligned.h>
51
52 #include <scsi/scsi.h>
53 #include <scsi/scsi_cmnd.h>
54 #include <scsi/scsi_device.h>
55 #include <scsi/scsi_host.h>
56 #include <scsi/scsicam.h>
57 #include <scsi/scsi_eh.h>
58 #include <scsi/scsi_tcq.h>
59 #include <scsi/scsi_dbg.h>
60
61 #include "sd.h"
62 #include "scsi_logging.h"
63
64 /* make sure inq_product_rev string corresponds to this version */
65 #define SDEBUG_VERSION "0191"   /* format to fit INQUIRY revision field */
66 static const char *sdebug_version_date = "20210520";
67
68 #define MY_NAME "scsi_debug"
69
70 /* Additional Sense Code (ASC) */
71 #define NO_ADDITIONAL_SENSE 0x0
72 #define LOGICAL_UNIT_NOT_READY 0x4
73 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
74 #define UNRECOVERED_READ_ERR 0x11
75 #define PARAMETER_LIST_LENGTH_ERR 0x1a
76 #define INVALID_OPCODE 0x20
77 #define LBA_OUT_OF_RANGE 0x21
78 #define INVALID_FIELD_IN_CDB 0x24
79 #define INVALID_FIELD_IN_PARAM_LIST 0x26
80 #define WRITE_PROTECTED 0x27
81 #define UA_RESET_ASC 0x29
82 #define UA_CHANGED_ASC 0x2a
83 #define TARGET_CHANGED_ASC 0x3f
84 #define LUNS_CHANGED_ASCQ 0x0e
85 #define INSUFF_RES_ASC 0x55
86 #define INSUFF_RES_ASCQ 0x3
87 #define POWER_ON_RESET_ASCQ 0x0
88 #define POWER_ON_OCCURRED_ASCQ 0x1
89 #define BUS_RESET_ASCQ 0x2      /* scsi bus reset occurred */
90 #define MODE_CHANGED_ASCQ 0x1   /* mode parameters changed */
91 #define CAPACITY_CHANGED_ASCQ 0x9
92 #define SAVING_PARAMS_UNSUP 0x39
93 #define TRANSPORT_PROBLEM 0x4b
94 #define THRESHOLD_EXCEEDED 0x5d
95 #define LOW_POWER_COND_ON 0x5e
96 #define MISCOMPARE_VERIFY_ASC 0x1d
97 #define MICROCODE_CHANGED_ASCQ 0x1      /* with TARGET_CHANGED_ASC */
98 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
99 #define WRITE_ERROR_ASC 0xc
100 #define UNALIGNED_WRITE_ASCQ 0x4
101 #define WRITE_BOUNDARY_ASCQ 0x5
102 #define READ_INVDATA_ASCQ 0x6
103 #define READ_BOUNDARY_ASCQ 0x7
104 #define ATTEMPT_ACCESS_GAP 0x9
105 #define INSUFF_ZONE_ASCQ 0xe
106
107 /* Additional Sense Code Qualifier (ASCQ) */
108 #define ACK_NAK_TO 0x3
109
110 /* Default values for driver parameters */
111 #define DEF_NUM_HOST   1
112 #define DEF_NUM_TGTS   1
113 #define DEF_MAX_LUNS   1
114 /* With these defaults, this driver will make 1 host with 1 target
115  * (id 0) containing 1 logical unit (lun 0). That is 1 device.
116  */
117 #define DEF_ATO 1
118 #define DEF_CDB_LEN 10
119 #define DEF_JDELAY   1          /* if > 0 unit is a jiffy */
120 #define DEF_DEV_SIZE_PRE_INIT   0
121 #define DEF_DEV_SIZE_MB   8
122 #define DEF_ZBC_DEV_SIZE_MB   128
123 #define DEF_DIF 0
124 #define DEF_DIX 0
125 #define DEF_PER_HOST_STORE false
126 #define DEF_D_SENSE   0
127 #define DEF_EVERY_NTH   0
128 #define DEF_FAKE_RW     0
129 #define DEF_GUARD 0
130 #define DEF_HOST_LOCK 0
131 #define DEF_LBPU 0
132 #define DEF_LBPWS 0
133 #define DEF_LBPWS10 0
134 #define DEF_LBPRZ 1
135 #define DEF_LOWEST_ALIGNED 0
136 #define DEF_NDELAY   0          /* if > 0 unit is a nanosecond */
137 #define DEF_NO_LUN_0   0
138 #define DEF_NUM_PARTS   0
139 #define DEF_OPTS   0
140 #define DEF_OPT_BLKS 1024
141 #define DEF_PHYSBLK_EXP 0
142 #define DEF_OPT_XFERLEN_EXP 0
143 #define DEF_PTYPE   TYPE_DISK
144 #define DEF_RANDOM false
145 #define DEF_REMOVABLE false
146 #define DEF_SCSI_LEVEL   7    /* INQUIRY, byte2 [6->SPC-4; 7->SPC-5] */
147 #define DEF_SECTOR_SIZE 512
148 #define DEF_UNMAP_ALIGNMENT 0
149 #define DEF_UNMAP_GRANULARITY 1
150 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
151 #define DEF_UNMAP_MAX_DESC 256
152 #define DEF_VIRTUAL_GB   0
153 #define DEF_VPD_USE_HOSTNO 1
154 #define DEF_WRITESAME_LENGTH 0xFFFF
155 #define DEF_STRICT 0
156 #define DEF_STATISTICS false
157 #define DEF_SUBMIT_QUEUES 1
158 #define DEF_TUR_MS_TO_READY 0
159 #define DEF_UUID_CTL 0
160 #define JDELAY_OVERRIDDEN -9999
161
162 /* Default parameters for ZBC drives */
163 #define DEF_ZBC_ZONE_SIZE_MB    128
164 #define DEF_ZBC_MAX_OPEN_ZONES  8
165 #define DEF_ZBC_NR_CONV_ZONES   1
166
167 #define SDEBUG_LUN_0_VAL 0
168
169 /* bit mask values for sdebug_opts */
170 #define SDEBUG_OPT_NOISE                1
171 #define SDEBUG_OPT_MEDIUM_ERR           2
172 #define SDEBUG_OPT_TIMEOUT              4
173 #define SDEBUG_OPT_RECOVERED_ERR        8
174 #define SDEBUG_OPT_TRANSPORT_ERR        16
175 #define SDEBUG_OPT_DIF_ERR              32
176 #define SDEBUG_OPT_DIX_ERR              64
177 #define SDEBUG_OPT_MAC_TIMEOUT          128
178 #define SDEBUG_OPT_SHORT_TRANSFER       0x100
179 #define SDEBUG_OPT_Q_NOISE              0x200
180 #define SDEBUG_OPT_ALL_TSF              0x400   /* ignore */
181 #define SDEBUG_OPT_RARE_TSF             0x800
182 #define SDEBUG_OPT_N_WCE                0x1000
183 #define SDEBUG_OPT_RESET_NOISE          0x2000
184 #define SDEBUG_OPT_NO_CDB_NOISE         0x4000
185 #define SDEBUG_OPT_HOST_BUSY            0x8000
186 #define SDEBUG_OPT_CMD_ABORT            0x10000
187 #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \
188                               SDEBUG_OPT_RESET_NOISE)
189 #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \
190                                   SDEBUG_OPT_TRANSPORT_ERR | \
191                                   SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \
192                                   SDEBUG_OPT_SHORT_TRANSFER | \
193                                   SDEBUG_OPT_HOST_BUSY | \
194                                   SDEBUG_OPT_CMD_ABORT)
195 #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
196                                   SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
197
198 /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
199  * priority order. In the subset implemented here lower numbers have higher
200  * priority. The UA numbers should be a sequence starting from 0 with
201  * SDEBUG_NUM_UAS being 1 higher than the highest numbered UA. */
202 #define SDEBUG_UA_POR 0         /* Power on, reset, or bus device reset */
203 #define SDEBUG_UA_POOCCUR 1     /* Power on occurred */
204 #define SDEBUG_UA_BUS_RESET 2
205 #define SDEBUG_UA_MODE_CHANGED 3
206 #define SDEBUG_UA_CAPACITY_CHANGED 4
207 #define SDEBUG_UA_LUNS_CHANGED 5
208 #define SDEBUG_UA_MICROCODE_CHANGED 6   /* simulate firmware change */
209 #define SDEBUG_UA_MICROCODE_CHANGED_WO_RESET 7
210 #define SDEBUG_NUM_UAS 8
211
212 /* when 1==SDEBUG_OPT_MEDIUM_ERR, a medium error is simulated at this
213  * sector on read commands: */
214 #define OPT_MEDIUM_ERR_ADDR   0x1234 /* that's sector 4660 in decimal */
215 #define OPT_MEDIUM_ERR_NUM    10     /* number of consecutive medium errs */
216
217 /* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
218  * (for response) per submit queue at one time. Can be reduced by max_queue
219  * option. Command responses are not queued when jdelay=0 and ndelay=0. The
220  * per-device DEF_CMD_PER_LUN can be changed via sysfs:
221  * /sys/class/scsi_device/<h:c:t:l>/device/queue_depth
222  * but cannot exceed SDEBUG_CANQUEUE .
223  */
224 #define SDEBUG_CANQUEUE_WORDS  3        /* a WORD is bits in a long */
225 #define SDEBUG_CANQUEUE  (SDEBUG_CANQUEUE_WORDS * BITS_PER_LONG)
226 #define DEF_CMD_PER_LUN  SDEBUG_CANQUEUE
227
228 /* UA - Unit Attention; SA - Service Action; SSU - Start Stop Unit */
229 #define F_D_IN                  1       /* Data-in command (e.g. READ) */
230 #define F_D_OUT                 2       /* Data-out command (e.g. WRITE) */
231 #define F_D_OUT_MAYBE           4       /* WRITE SAME, NDOB bit */
232 #define F_D_UNKN                8
233 #define F_RL_WLUN_OK            0x10    /* allowed with REPORT LUNS W-LUN */
234 #define F_SKIP_UA               0x20    /* bypass UAs (e.g. INQUIRY command) */
235 #define F_DELAY_OVERR           0x40    /* for commands like INQUIRY */
236 #define F_SA_LOW                0x80    /* SA is in cdb byte 1, bits 4 to 0 */
237 #define F_SA_HIGH               0x100   /* SA is in cdb bytes 8 and 9 */
238 #define F_INV_OP                0x200   /* invalid opcode (not supported) */
239 #define F_FAKE_RW               0x400   /* bypass resp_*() when fake_rw set */
240 #define F_M_ACCESS              0x800   /* media access, reacts to SSU state */
241 #define F_SSU_DELAY             0x1000  /* SSU command delay (long-ish) */
242 #define F_SYNC_DELAY            0x2000  /* SYNCHRONIZE CACHE delay */
243
244 /* Useful combinations of the above flags */
245 #define FF_RESPOND (F_RL_WLUN_OK | F_SKIP_UA | F_DELAY_OVERR)
246 #define FF_MEDIA_IO (F_M_ACCESS | F_FAKE_RW)
247 #define FF_SA (F_SA_HIGH | F_SA_LOW)
248 #define F_LONG_DELAY            (F_SSU_DELAY | F_SYNC_DELAY)
249
250 #define SDEBUG_MAX_PARTS 4
251
252 #define SDEBUG_MAX_CMD_LEN 32
253
254 #define SDEB_XA_NOT_IN_USE XA_MARK_1
255
256 static struct kmem_cache *queued_cmd_cache;
257
258 #define TO_QUEUED_CMD(scmd)  ((void *)(scmd)->host_scribble)
259 #define ASSIGN_QUEUED_CMD(scmnd, qc) { (scmnd)->host_scribble = (void *) qc; }
260
261 /* Zone types (zbcr05 table 25) */
262 enum sdebug_z_type {
263         ZBC_ZTYPE_CNV   = 0x1,
264         ZBC_ZTYPE_SWR   = 0x2,
265         ZBC_ZTYPE_SWP   = 0x3,
266         /* ZBC_ZTYPE_SOBR = 0x4, */
267         ZBC_ZTYPE_GAP   = 0x5,
268 };
269
270 /* enumeration names taken from table 26, zbcr05 */
271 enum sdebug_z_cond {
272         ZBC_NOT_WRITE_POINTER   = 0x0,
273         ZC1_EMPTY               = 0x1,
274         ZC2_IMPLICIT_OPEN       = 0x2,
275         ZC3_EXPLICIT_OPEN       = 0x3,
276         ZC4_CLOSED              = 0x4,
277         ZC6_READ_ONLY           = 0xd,
278         ZC5_FULL                = 0xe,
279         ZC7_OFFLINE             = 0xf,
280 };
281
282 struct sdeb_zone_state {        /* ZBC: per zone state */
283         enum sdebug_z_type z_type;
284         enum sdebug_z_cond z_cond;
285         bool z_non_seq_resource;
286         unsigned int z_size;
287         sector_t z_start;
288         sector_t z_wp;
289 };
290
291 enum sdebug_err_type {
292         ERR_TMOUT_CMD           = 0,    /* make specific scsi command timeout */
293         ERR_FAIL_QUEUE_CMD      = 1,    /* make specific scsi command's */
294                                         /* queuecmd return failed */
295         ERR_FAIL_CMD            = 2,    /* make specific scsi command's */
296                                         /* queuecmd return succeed but */
297                                         /* with errors set in scsi_cmnd */
298         ERR_ABORT_CMD_FAILED    = 3,    /* control return FAILED from */
299                                         /* scsi_debug_abort() */
300         ERR_LUN_RESET_FAILED    = 4,    /* control return FAILED from */
301                                         /* scsi_debug_device_reseLUN_RESET_FAILEDt() */
302 };
303
304 struct sdebug_err_inject {
305         int type;
306         struct list_head list;
307         int cnt;
308         unsigned char cmd;
309         struct rcu_head rcu;
310
311         union {
312                 /*
313                  * For ERR_FAIL_QUEUE_CMD
314                  */
315                 int queuecmd_ret;
316
317                 /*
318                  * For ERR_FAIL_CMD
319                  */
320                 struct {
321                         unsigned char host_byte;
322                         unsigned char driver_byte;
323                         unsigned char status_byte;
324                         unsigned char sense_key;
325                         unsigned char asc;
326                         unsigned char asq;
327                 };
328         };
329 };
330
331 struct sdebug_dev_info {
332         struct list_head dev_list;
333         unsigned int channel;
334         unsigned int target;
335         u64 lun;
336         uuid_t lu_name;
337         struct sdebug_host_info *sdbg_host;
338         unsigned long uas_bm[1];
339         atomic_t stopped;       /* 1: by SSU, 2: device start */
340         bool used;
341
342         /* For ZBC devices */
343         bool zoned;
344         unsigned int zcap;
345         unsigned int zsize;
346         unsigned int zsize_shift;
347         unsigned int nr_zones;
348         unsigned int nr_conv_zones;
349         unsigned int nr_seq_zones;
350         unsigned int nr_imp_open;
351         unsigned int nr_exp_open;
352         unsigned int nr_closed;
353         unsigned int max_open;
354         ktime_t create_ts;      /* time since bootup that this device was created */
355         struct sdeb_zone_state *zstate;
356
357         struct dentry *debugfs_entry;
358         struct spinlock list_lock;
359         struct list_head inject_err_list;
360 };
361
362 struct sdebug_target_info {
363         bool reset_fail;
364         struct dentry *debugfs_entry;
365 };
366
367 struct sdebug_host_info {
368         struct list_head host_list;
369         int si_idx;     /* sdeb_store_info (per host) xarray index */
370         struct Scsi_Host *shost;
371         struct device dev;
372         struct list_head dev_info_list;
373 };
374
375 /* There is an xarray of pointers to this struct's objects, one per host */
376 struct sdeb_store_info {
377         rwlock_t macc_lck;      /* for atomic media access on this store */
378         u8 *storep;             /* user data storage (ram) */
379         struct t10_pi_tuple *dif_storep; /* protection info */
380         void *map_storep;       /* provisioning map */
381 };
382
383 #define dev_to_sdebug_host(d)   \
384         container_of(d, struct sdebug_host_info, dev)
385
386 #define shost_to_sdebug_host(shost)     \
387         dev_to_sdebug_host(shost->dma_dev)
388
389 enum sdeb_defer_type {SDEB_DEFER_NONE = 0, SDEB_DEFER_HRT = 1,
390                       SDEB_DEFER_WQ = 2, SDEB_DEFER_POLL = 3};
391
392 struct sdebug_defer {
393         struct hrtimer hrt;
394         struct execute_work ew;
395         ktime_t cmpl_ts;/* time since boot to complete this cmd */
396         int issuing_cpu;
397         bool aborted;   /* true when blk_abort_request() already called */
398         enum sdeb_defer_type defer_t;
399 };
400
401 struct sdebug_queued_cmd {
402         /* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
403          * instance indicates this slot is in use.
404          */
405         struct sdebug_defer sd_dp;
406         struct scsi_cmnd *scmd;
407 };
408
409 struct sdebug_scsi_cmd {
410         spinlock_t   lock;
411 };
412
413 static atomic_t sdebug_cmnd_count;   /* number of incoming commands */
414 static atomic_t sdebug_completions;  /* count of deferred completions */
415 static atomic_t sdebug_miss_cpus;    /* submission + completion cpus differ */
416 static atomic_t sdebug_a_tsf;        /* 'almost task set full' counter */
417 static atomic_t sdeb_inject_pending;
418 static atomic_t sdeb_mq_poll_count;  /* bumped when mq_poll returns > 0 */
419
420 struct opcode_info_t {
421         u8 num_attached;        /* 0 if this is it (i.e. a leaf); use 0xff */
422                                 /* for terminating element */
423         u8 opcode;              /* if num_attached > 0, preferred */
424         u16 sa;                 /* service action */
425         u32 flags;              /* OR-ed set of SDEB_F_* */
426         int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
427         const struct opcode_info_t *arrp;  /* num_attached elements or NULL */
428         u8 len_mask[16];        /* len_mask[0]-->cdb_len, then mask for cdb */
429                                 /* 1 to min(cdb_len, 15); ignore cdb[15...] */
430 };
431
432 /* SCSI opcodes (first byte of cdb) of interest mapped onto these indexes */
433 enum sdeb_opcode_index {
434         SDEB_I_INVALID_OPCODE = 0,
435         SDEB_I_INQUIRY = 1,
436         SDEB_I_REPORT_LUNS = 2,
437         SDEB_I_REQUEST_SENSE = 3,
438         SDEB_I_TEST_UNIT_READY = 4,
439         SDEB_I_MODE_SENSE = 5,          /* 6, 10 */
440         SDEB_I_MODE_SELECT = 6,         /* 6, 10 */
441         SDEB_I_LOG_SENSE = 7,
442         SDEB_I_READ_CAPACITY = 8,       /* 10; 16 is in SA_IN(16) */
443         SDEB_I_READ = 9,                /* 6, 10, 12, 16 */
444         SDEB_I_WRITE = 10,              /* 6, 10, 12, 16 */
445         SDEB_I_START_STOP = 11,
446         SDEB_I_SERV_ACT_IN_16 = 12,     /* add ...SERV_ACT_IN_12 if needed */
447         SDEB_I_SERV_ACT_OUT_16 = 13,    /* add ...SERV_ACT_OUT_12 if needed */
448         SDEB_I_MAINT_IN = 14,
449         SDEB_I_MAINT_OUT = 15,
450         SDEB_I_VERIFY = 16,             /* VERIFY(10), VERIFY(16) */
451         SDEB_I_VARIABLE_LEN = 17,       /* READ(32), WRITE(32), WR_SCAT(32) */
452         SDEB_I_RESERVE = 18,            /* 6, 10 */
453         SDEB_I_RELEASE = 19,            /* 6, 10 */
454         SDEB_I_ALLOW_REMOVAL = 20,      /* PREVENT ALLOW MEDIUM REMOVAL */
455         SDEB_I_REZERO_UNIT = 21,        /* REWIND in SSC */
456         SDEB_I_ATA_PT = 22,             /* 12, 16 */
457         SDEB_I_SEND_DIAG = 23,
458         SDEB_I_UNMAP = 24,
459         SDEB_I_WRITE_BUFFER = 25,
460         SDEB_I_WRITE_SAME = 26,         /* 10, 16 */
461         SDEB_I_SYNC_CACHE = 27,         /* 10, 16 */
462         SDEB_I_COMP_WRITE = 28,
463         SDEB_I_PRE_FETCH = 29,          /* 10, 16 */
464         SDEB_I_ZONE_OUT = 30,           /* 0x94+SA; includes no data xfer */
465         SDEB_I_ZONE_IN = 31,            /* 0x95+SA; all have data-in */
466         SDEB_I_LAST_ELEM_P1 = 32,       /* keep this last (previous + 1) */
467 };
468
469
470 static const unsigned char opcode_ind_arr[256] = {
471 /* 0x0; 0x0->0x1f: 6 byte cdbs */
472         SDEB_I_TEST_UNIT_READY, SDEB_I_REZERO_UNIT, 0, SDEB_I_REQUEST_SENSE,
473             0, 0, 0, 0,
474         SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, 0,
475         0, 0, SDEB_I_INQUIRY, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
476             SDEB_I_RELEASE,
477         0, 0, SDEB_I_MODE_SENSE, SDEB_I_START_STOP, 0, SDEB_I_SEND_DIAG,
478             SDEB_I_ALLOW_REMOVAL, 0,
479 /* 0x20; 0x20->0x3f: 10 byte cdbs */
480         0, 0, 0, 0, 0, SDEB_I_READ_CAPACITY, 0, 0,
481         SDEB_I_READ, 0, SDEB_I_WRITE, 0, 0, 0, 0, SDEB_I_VERIFY,
482         0, 0, 0, 0, SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, 0,
483         0, 0, 0, SDEB_I_WRITE_BUFFER, 0, 0, 0, 0,
484 /* 0x40; 0x40->0x5f: 10 byte cdbs */
485         0, SDEB_I_WRITE_SAME, SDEB_I_UNMAP, 0, 0, 0, 0, 0,
486         0, 0, 0, 0, 0, SDEB_I_LOG_SENSE, 0, 0,
487         0, 0, 0, 0, 0, SDEB_I_MODE_SELECT, SDEB_I_RESERVE,
488             SDEB_I_RELEASE,
489         0, 0, SDEB_I_MODE_SENSE, 0, 0, 0, 0, 0,
490 /* 0x60; 0x60->0x7d are reserved, 0x7e is "extended cdb" */
491         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
492         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
493         0, SDEB_I_VARIABLE_LEN,
494 /* 0x80; 0x80->0x9f: 16 byte cdbs */
495         0, 0, 0, 0, 0, SDEB_I_ATA_PT, 0, 0,
496         SDEB_I_READ, SDEB_I_COMP_WRITE, SDEB_I_WRITE, 0,
497         0, 0, 0, SDEB_I_VERIFY,
498         SDEB_I_PRE_FETCH, SDEB_I_SYNC_CACHE, 0, SDEB_I_WRITE_SAME,
499         SDEB_I_ZONE_OUT, SDEB_I_ZONE_IN, 0, 0,
500         0, 0, 0, 0, 0, 0, SDEB_I_SERV_ACT_IN_16, SDEB_I_SERV_ACT_OUT_16,
501 /* 0xa0; 0xa0->0xbf: 12 byte cdbs */
502         SDEB_I_REPORT_LUNS, SDEB_I_ATA_PT, 0, SDEB_I_MAINT_IN,
503              SDEB_I_MAINT_OUT, 0, 0, 0,
504         SDEB_I_READ, 0 /* SDEB_I_SERV_ACT_OUT_12 */, SDEB_I_WRITE,
505              0 /* SDEB_I_SERV_ACT_IN_12 */, 0, 0, 0, 0,
506         0, 0, 0, 0, 0, 0, 0, 0,
507         0, 0, 0, 0, 0, 0, 0, 0,
508 /* 0xc0; 0xc0->0xff: vendor specific */
509         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
510         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
511         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
512         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
513 };
514
515 /*
516  * The following "response" functions return the SCSI mid-level's 4 byte
517  * tuple-in-an-int. To handle commands with an IMMED bit, for a faster
518  * command completion, they can mask their return value with
519  * SDEG_RES_IMMED_MASK .
520  */
521 #define SDEG_RES_IMMED_MASK 0x40000000
522
523 static int resp_inquiry(struct scsi_cmnd *, struct sdebug_dev_info *);
524 static int resp_report_luns(struct scsi_cmnd *, struct sdebug_dev_info *);
525 static int resp_requests(struct scsi_cmnd *, struct sdebug_dev_info *);
526 static int resp_mode_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
527 static int resp_mode_select(struct scsi_cmnd *, struct sdebug_dev_info *);
528 static int resp_log_sense(struct scsi_cmnd *, struct sdebug_dev_info *);
529 static int resp_readcap(struct scsi_cmnd *, struct sdebug_dev_info *);
530 static int resp_read_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
531 static int resp_write_dt0(struct scsi_cmnd *, struct sdebug_dev_info *);
532 static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
533 static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
534 static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
535 static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
536 static int resp_get_stream_status(struct scsi_cmnd *scp,
537                                   struct sdebug_dev_info *devip);
538 static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
539 static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
540 static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
541 static int resp_rsup_tmfs(struct scsi_cmnd *, struct sdebug_dev_info *);
542 static int resp_verify(struct scsi_cmnd *, struct sdebug_dev_info *);
543 static int resp_write_same_10(struct scsi_cmnd *, struct sdebug_dev_info *);
544 static int resp_write_same_16(struct scsi_cmnd *, struct sdebug_dev_info *);
545 static int resp_comp_write(struct scsi_cmnd *, struct sdebug_dev_info *);
546 static int resp_write_buffer(struct scsi_cmnd *, struct sdebug_dev_info *);
547 static int resp_sync_cache(struct scsi_cmnd *, struct sdebug_dev_info *);
548 static int resp_pre_fetch(struct scsi_cmnd *, struct sdebug_dev_info *);
549 static int resp_report_zones(struct scsi_cmnd *, struct sdebug_dev_info *);
550 static int resp_open_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
551 static int resp_close_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
552 static int resp_finish_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
553 static int resp_rwp_zone(struct scsi_cmnd *, struct sdebug_dev_info *);
554
555 static int sdebug_do_add_host(bool mk_new_store);
556 static int sdebug_add_host_helper(int per_host_idx);
557 static void sdebug_do_remove_host(bool the_end);
558 static int sdebug_add_store(void);
559 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
560 static void sdebug_erase_all_stores(bool apart_from_first);
561
562 static void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp);
563
564 /*
565  * The following are overflow arrays for cdbs that "hit" the same index in
566  * the opcode_info_arr array. The most time sensitive (or commonly used) cdb
567  * should be placed in opcode_info_arr[], the others should be placed here.
568  */
569 static const struct opcode_info_t msense_iarr[] = {
570         {0, 0x1a, 0, F_D_IN, NULL, NULL,
571             {6,  0xe8, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
572 };
573
574 static const struct opcode_info_t mselect_iarr[] = {
575         {0, 0x15, 0, F_D_OUT, NULL, NULL,
576             {6,  0xf1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
577 };
578
579 static const struct opcode_info_t read_iarr[] = {
580         {0, 0x28, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(10) */
581             {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
582              0, 0, 0, 0} },
583         {0, 0x8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL, /* READ(6) */
584             {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
585         {0, 0xa8, 0, F_D_IN | FF_MEDIA_IO, resp_read_dt0, NULL,/* READ(12) */
586             {12,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf,
587              0xc7, 0, 0, 0, 0} },
588 };
589
590 static const struct opcode_info_t write_iarr[] = {
591         {0, 0x2a, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(10) */
592             NULL, {10,  0xfb, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7,
593                    0, 0, 0, 0, 0, 0} },
594         {0, 0xa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,   /* WRITE(6) */
595             NULL, {6,  0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0,
596                    0, 0, 0} },
597         {0, 0xaa, 0, F_D_OUT | FF_MEDIA_IO, resp_write_dt0,  /* WRITE(12) */
598             NULL, {12,  0xfb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
599                    0xbf, 0xc7, 0, 0, 0, 0} },
600 };
601
602 static const struct opcode_info_t verify_iarr[] = {
603         {0, 0x2f, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,/* VERIFY(10) */
604             NULL, {10,  0xf7, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xff, 0xff, 0xc7,
605                    0, 0, 0, 0, 0, 0} },
606 };
607
608 static const struct opcode_info_t sa_in_16_iarr[] = {
609         {0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
610             {16,  0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
611              0xff, 0xff, 0xff, 0, 0xc7} },      /* GET LBA STATUS(16) */
612         {0, 0x9e, 0x16, F_SA_LOW | F_D_IN, resp_get_stream_status, NULL,
613             {16, 0x16, 0, 0, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff,
614              0, 0} },   /* GET STREAM STATUS */
615 };
616
617 static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
618         {0, 0x7f, 0xb, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_dt0,
619             NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0xb, 0xfa,
620                    0, 0xff, 0xff, 0xff, 0xff} },        /* WRITE(32) */
621         {0, 0x7f, 0x11, F_SA_HIGH | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
622             NULL, {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x11, 0xf8,
623                    0, 0xff, 0xff, 0x0, 0x0} },  /* WRITE SCATTERED(32) */
624 };
625
626 static const struct opcode_info_t maint_in_iarr[] = {   /* MAINT IN */
627         {0, 0xa3, 0xc, F_SA_LOW | F_D_IN, resp_rsup_opcodes, NULL,
628             {12,  0xc, 0x87, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0,
629              0xc7, 0, 0, 0, 0} }, /* REPORT SUPPORTED OPERATION CODES */
630         {0, 0xa3, 0xd, F_SA_LOW | F_D_IN, resp_rsup_tmfs, NULL,
631             {12,  0xd, 0x80, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
632              0, 0} },   /* REPORTED SUPPORTED TASK MANAGEMENT FUNCTIONS */
633 };
634
635 static const struct opcode_info_t write_same_iarr[] = {
636         {0, 0x93, 0, F_D_OUT_MAYBE | FF_MEDIA_IO, resp_write_same_16, NULL,
637             {16,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
638              0xff, 0xff, 0xff, 0x3f, 0xc7} },           /* WRITE SAME(16) */
639 };
640
641 static const struct opcode_info_t reserve_iarr[] = {
642         {0, 0x16, 0, F_D_OUT, NULL, NULL,               /* RESERVE(6) */
643             {6,  0x1f, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
644 };
645
646 static const struct opcode_info_t release_iarr[] = {
647         {0, 0x17, 0, F_D_OUT, NULL, NULL,               /* RELEASE(6) */
648             {6,  0x1f, 0xff, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
649 };
650
651 static const struct opcode_info_t sync_cache_iarr[] = {
652         {0, 0x91, 0, F_SYNC_DELAY | F_M_ACCESS, resp_sync_cache, NULL,
653             {16,  0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
654              0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },     /* SYNC_CACHE (16) */
655 };
656
657 static const struct opcode_info_t pre_fetch_iarr[] = {
658         {0, 0x90, 0, F_SYNC_DELAY | FF_MEDIA_IO, resp_pre_fetch, NULL,
659             {16,  0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
660              0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },     /* PRE-FETCH (16) */
661 };
662
663 static const struct opcode_info_t zone_out_iarr[] = {   /* ZONE OUT(16) */
664         {0, 0x94, 0x1, F_SA_LOW | F_M_ACCESS, resp_close_zone, NULL,
665             {16, 0x1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
666              0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },      /* CLOSE ZONE */
667         {0, 0x94, 0x2, F_SA_LOW | F_M_ACCESS, resp_finish_zone, NULL,
668             {16, 0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
669              0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },      /* FINISH ZONE */
670         {0, 0x94, 0x4, F_SA_LOW | F_M_ACCESS, resp_rwp_zone, NULL,
671             {16, 0x4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
672              0xff, 0, 0, 0xff, 0xff, 0x1, 0xc7} },  /* RESET WRITE POINTER */
673 };
674
675 static const struct opcode_info_t zone_in_iarr[] = {    /* ZONE IN(16) */
676         {0, 0x95, 0x6, F_SA_LOW | F_D_IN | F_M_ACCESS, NULL, NULL,
677             {16, 0x6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
678              0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} }, /* REPORT ZONES */
679 };
680
681
682 /* This array is accessed via SDEB_I_* values. Make sure all are mapped,
683  * plus the terminating elements for logic that scans this table such as
684  * REPORT SUPPORTED OPERATION CODES. */
685 static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
686 /* 0 */
687         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL,    /* unknown opcodes */
688             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
689         {0, 0x12, 0, FF_RESPOND | F_D_IN, resp_inquiry, NULL, /* INQUIRY */
690             {6,  0xe3, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
691         {0, 0xa0, 0, FF_RESPOND | F_D_IN, resp_report_luns, NULL,
692             {12,  0xe3, 0xff, 0, 0, 0, 0xff, 0xff, 0xff, 0xff, 0, 0xc7, 0, 0,
693              0, 0} },                                   /* REPORT LUNS */
694         {0, 0x3, 0, FF_RESPOND | F_D_IN, resp_requests, NULL,
695             {6,  0xe1, 0, 0, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
696         {0, 0x0, 0, F_M_ACCESS | F_RL_WLUN_OK, NULL, NULL,/* TEST UNIT READY */
697             {6,  0, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
698 /* 5 */
699         {ARRAY_SIZE(msense_iarr), 0x5a, 0, F_D_IN,      /* MODE SENSE(10) */
700             resp_mode_sense, msense_iarr, {10,  0xf8, 0xff, 0xff, 0, 0, 0,
701                 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
702         {ARRAY_SIZE(mselect_iarr), 0x55, 0, F_D_OUT,    /* MODE SELECT(10) */
703             resp_mode_select, mselect_iarr, {10,  0xf1, 0, 0, 0, 0, 0, 0xff,
704                 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
705         {0, 0x4d, 0, F_D_IN, resp_log_sense, NULL,      /* LOG SENSE */
706             {10,  0xe3, 0xff, 0xff, 0, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0, 0,
707              0, 0, 0} },
708         {0, 0x25, 0, F_D_IN, resp_readcap, NULL,    /* READ CAPACITY(10) */
709             {10,  0xe1, 0xff, 0xff, 0xff, 0xff, 0, 0, 0x1, 0xc7, 0, 0, 0, 0,
710              0, 0} },
711         {ARRAY_SIZE(read_iarr), 0x88, 0, F_D_IN | FF_MEDIA_IO, /* READ(16) */
712             resp_read_dt0, read_iarr, {16,  0xfe, 0xff, 0xff, 0xff, 0xff,
713             0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
714 /* 10 */
715         {ARRAY_SIZE(write_iarr), 0x8a, 0, F_D_OUT | FF_MEDIA_IO,
716             resp_write_dt0, write_iarr,                 /* WRITE(16) */
717                 {16,  0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
718                  0xff, 0xff, 0xff, 0xff, 0xff, 0xc7} },
719         {0, 0x1b, 0, F_SSU_DELAY, resp_start_stop, NULL,/* START STOP UNIT */
720             {6,  0x1, 0, 0xf, 0xf7, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
721         {ARRAY_SIZE(sa_in_16_iarr), 0x9e, 0x10, F_SA_LOW | F_D_IN,
722             resp_readcap16, sa_in_16_iarr, /* SA_IN(16), READ CAPACITY(16) */
723                 {16,  0x10, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
724                  0xff, 0xff, 0xff, 0xff, 0x1, 0xc7} },
725         {0, 0x9f, 0x12, F_SA_LOW | F_D_OUT | FF_MEDIA_IO, resp_write_scat,
726             NULL, {16,  0x12, 0xf9, 0x0, 0xff, 0xff, 0, 0, 0xff, 0xff, 0xff,
727             0xff, 0xff, 0xff, 0xff, 0xc7} },  /* SA_OUT(16), WRITE SCAT(16) */
728         {ARRAY_SIZE(maint_in_iarr), 0xa3, 0xa, F_SA_LOW | F_D_IN,
729             resp_report_tgtpgs, /* MAINT IN, REPORT TARGET PORT GROUPS */
730                 maint_in_iarr, {12,  0xea, 0, 0, 0, 0, 0xff, 0xff, 0xff,
731                                 0xff, 0, 0xc7, 0, 0, 0, 0} },
732 /* 15 */
733         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* MAINT OUT */
734             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
735         {ARRAY_SIZE(verify_iarr), 0x8f, 0,
736             F_D_OUT_MAYBE | FF_MEDIA_IO, resp_verify,   /* VERIFY(16) */
737             verify_iarr, {16,  0xf6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
738                           0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xc7} },
739         {ARRAY_SIZE(vl_iarr), 0x7f, 0x9, F_SA_HIGH | F_D_IN | FF_MEDIA_IO,
740             resp_read_dt0, vl_iarr,     /* VARIABLE LENGTH, READ(32) */
741             {32,  0xc7, 0, 0, 0, 0, 0x3f, 0x18, 0x0, 0x9, 0xfe, 0, 0xff, 0xff,
742              0xff, 0xff} },
743         {ARRAY_SIZE(reserve_iarr), 0x56, 0, F_D_OUT,
744             NULL, reserve_iarr, /* RESERVE(10) <no response function> */
745             {10,  0xff, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
746              0} },
747         {ARRAY_SIZE(release_iarr), 0x57, 0, F_D_OUT,
748             NULL, release_iarr, /* RELEASE(10) <no response function> */
749             {10,  0x13, 0xff, 0xff, 0, 0, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0,
750              0} },
751 /* 20 */
752         {0, 0x1e, 0, 0, NULL, NULL, /* ALLOW REMOVAL */
753             {6,  0, 0, 0, 0x3, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
754         {0, 0x1, 0, 0, resp_start_stop, NULL, /* REWIND ?? */
755             {6,  0x1, 0, 0, 0, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
756         {0, 0, 0, F_INV_OP | FF_RESPOND, NULL, NULL, /* ATA_PT */
757             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
758         {0, 0x1d, F_D_OUT, 0, NULL, NULL,       /* SEND DIAGNOSTIC */
759             {6,  0xf7, 0, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
760         {0, 0x42, 0, F_D_OUT | FF_MEDIA_IO, resp_unmap, NULL, /* UNMAP */
761             {10,  0x1, 0, 0, 0, 0, 0x3f, 0xff, 0xff, 0xc7, 0, 0, 0, 0, 0, 0} },
762 /* 25 */
763         {0, 0x3b, 0, F_D_OUT_MAYBE, resp_write_buffer, NULL,
764             {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc7, 0, 0,
765              0, 0, 0, 0} },                     /* WRITE_BUFFER */
766         {ARRAY_SIZE(write_same_iarr), 0x41, 0, F_D_OUT_MAYBE | FF_MEDIA_IO,
767             resp_write_same_10, write_same_iarr,        /* WRITE SAME(10) */
768                 {10,  0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0,
769                  0, 0, 0, 0, 0} },
770         {ARRAY_SIZE(sync_cache_iarr), 0x35, 0, F_SYNC_DELAY | F_M_ACCESS,
771             resp_sync_cache, sync_cache_iarr,
772             {10,  0x7, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
773              0, 0, 0, 0} },                     /* SYNC_CACHE (10) */
774         {0, 0x89, 0, F_D_OUT | FF_MEDIA_IO, resp_comp_write, NULL,
775             {16,  0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0, 0,
776              0, 0xff, 0x3f, 0xc7} },            /* COMPARE AND WRITE */
777         {ARRAY_SIZE(pre_fetch_iarr), 0x34, 0, F_SYNC_DELAY | FF_MEDIA_IO,
778             resp_pre_fetch, pre_fetch_iarr,
779             {10,  0x2, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, 0xc7, 0, 0,
780              0, 0, 0, 0} },                     /* PRE-FETCH (10) */
781
782 /* 30 */
783         {ARRAY_SIZE(zone_out_iarr), 0x94, 0x3, F_SA_LOW | F_M_ACCESS,
784             resp_open_zone, zone_out_iarr, /* ZONE_OUT(16), OPEN ZONE) */
785                 {16,  0x3 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
786                  0xff, 0xff, 0x0, 0x0, 0xff, 0xff, 0x1, 0xc7} },
787         {ARRAY_SIZE(zone_in_iarr), 0x95, 0x0, F_SA_LOW | F_M_ACCESS,
788             resp_report_zones, zone_in_iarr, /* ZONE_IN(16), REPORT ZONES) */
789                 {16,  0x0 /* SA */, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
790                  0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0xc7} },
791 /* sentinel */
792         {0xff, 0, 0, 0, NULL, NULL,             /* terminating element */
793             {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
794 };
795
796 static int sdebug_num_hosts;
797 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
798 static int sdebug_ato = DEF_ATO;
799 static int sdebug_cdb_len = DEF_CDB_LEN;
800 static int sdebug_jdelay = DEF_JDELAY;  /* if > 0 then unit is jiffies */
801 static int sdebug_dev_size_mb = DEF_DEV_SIZE_PRE_INIT;
802 static int sdebug_dif = DEF_DIF;
803 static int sdebug_dix = DEF_DIX;
804 static int sdebug_dsense = DEF_D_SENSE;
805 static int sdebug_every_nth = DEF_EVERY_NTH;
806 static int sdebug_fake_rw = DEF_FAKE_RW;
807 static unsigned int sdebug_guard = DEF_GUARD;
808 static int sdebug_host_max_queue;       /* per host */
809 static int sdebug_lowest_aligned = DEF_LOWEST_ALIGNED;
810 static int sdebug_max_luns = DEF_MAX_LUNS;
811 static int sdebug_max_queue = SDEBUG_CANQUEUE;  /* per submit queue */
812 static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
813 static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
814 static int sdebug_ndelay = DEF_NDELAY;  /* if > 0 then unit is nanoseconds */
815 static int sdebug_no_lun_0 = DEF_NO_LUN_0;
816 static int sdebug_no_uld;
817 static int sdebug_num_parts = DEF_NUM_PARTS;
818 static int sdebug_num_tgts = DEF_NUM_TGTS; /* targets per host */
819 static int sdebug_opt_blks = DEF_OPT_BLKS;
820 static int sdebug_opts = DEF_OPTS;
821 static int sdebug_physblk_exp = DEF_PHYSBLK_EXP;
822 static int sdebug_opt_xferlen_exp = DEF_OPT_XFERLEN_EXP;
823 static int sdebug_ptype = DEF_PTYPE; /* SCSI peripheral device type */
824 static int sdebug_scsi_level = DEF_SCSI_LEVEL;
825 static int sdebug_sector_size = DEF_SECTOR_SIZE;
826 static int sdeb_tur_ms_to_ready = DEF_TUR_MS_TO_READY;
827 static int sdebug_virtual_gb = DEF_VIRTUAL_GB;
828 static int sdebug_vpd_use_hostno = DEF_VPD_USE_HOSTNO;
829 static unsigned int sdebug_lbpu = DEF_LBPU;
830 static unsigned int sdebug_lbpws = DEF_LBPWS;
831 static unsigned int sdebug_lbpws10 = DEF_LBPWS10;
832 static unsigned int sdebug_lbprz = DEF_LBPRZ;
833 static unsigned int sdebug_unmap_alignment = DEF_UNMAP_ALIGNMENT;
834 static unsigned int sdebug_unmap_granularity = DEF_UNMAP_GRANULARITY;
835 static unsigned int sdebug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;
836 static unsigned int sdebug_unmap_max_desc = DEF_UNMAP_MAX_DESC;
837 static unsigned int sdebug_write_same_length = DEF_WRITESAME_LENGTH;
838 static int sdebug_uuid_ctl = DEF_UUID_CTL;
839 static bool sdebug_random = DEF_RANDOM;
840 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
841 static bool sdebug_removable = DEF_REMOVABLE;
842 static bool sdebug_clustering;
843 static bool sdebug_host_lock = DEF_HOST_LOCK;
844 static bool sdebug_strict = DEF_STRICT;
845 static bool sdebug_any_injecting_opt;
846 static bool sdebug_no_rwlock;
847 static bool sdebug_verbose;
848 static bool have_dif_prot;
849 static bool write_since_sync;
850 static bool sdebug_statistics = DEF_STATISTICS;
851 static bool sdebug_wp;
852 static bool sdebug_allow_restart;
853 static enum {
854         BLK_ZONED_NONE  = 0,
855         BLK_ZONED_HA    = 1,
856         BLK_ZONED_HM    = 2,
857 } sdeb_zbc_model = BLK_ZONED_NONE;
858 static char *sdeb_zbc_model_s;
859
860 enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
861                           SAM_LUN_AM_FLAT = 0x1,
862                           SAM_LUN_AM_LOGICAL_UNIT = 0x2,
863                           SAM_LUN_AM_EXTENDED = 0x3};
864 static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
865 static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
866
867 static unsigned int sdebug_store_sectors;
868 static sector_t sdebug_capacity;        /* in sectors */
869
870 /* old BIOS stuff, kernel may get rid of them but some mode sense pages
871    may still need them */
872 static int sdebug_heads;                /* heads per disk */
873 static int sdebug_cylinders_per;        /* cylinders per surface */
874 static int sdebug_sectors_per;          /* sectors per cylinder */
875
876 static LIST_HEAD(sdebug_host_list);
877 static DEFINE_MUTEX(sdebug_host_list_mutex);
878
879 static struct xarray per_store_arr;
880 static struct xarray *per_store_ap = &per_store_arr;
881 static int sdeb_first_idx = -1;         /* invalid index ==> none created */
882 static int sdeb_most_recent_idx = -1;
883 static DEFINE_RWLOCK(sdeb_fake_rw_lck); /* need a RW lock when fake_rw=1 */
884
885 static unsigned long map_size;
886 static int num_aborts;
887 static int num_dev_resets;
888 static int num_target_resets;
889 static int num_bus_resets;
890 static int num_host_resets;
891 static int dix_writes;
892 static int dix_reads;
893 static int dif_errors;
894
895 /* ZBC global data */
896 static bool sdeb_zbc_in_use;    /* true for host-aware and host-managed disks */
897 static int sdeb_zbc_zone_cap_mb;
898 static int sdeb_zbc_zone_size_mb;
899 static int sdeb_zbc_max_open = DEF_ZBC_MAX_OPEN_ZONES;
900 static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
901
902 static int submit_queues = DEF_SUBMIT_QUEUES;  /* > 1 for multi-queue (mq) */
903 static int poll_queues; /* iouring iopoll interface.*/
904
905 static char sdebug_proc_name[] = MY_NAME;
906 static const char *my_name = MY_NAME;
907
908 static struct bus_type pseudo_lld_bus;
909
910 static struct device_driver sdebug_driverfs_driver = {
911         .name           = sdebug_proc_name,
912         .bus            = &pseudo_lld_bus,
913 };
914
915 static const int check_condition_result =
916         SAM_STAT_CHECK_CONDITION;
917
918 static const int illegal_condition_result =
919         (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
920
921 static const int device_qfull_result =
922         (DID_ABORT << 16) | SAM_STAT_TASK_SET_FULL;
923
924 static const int condition_met_result = SAM_STAT_CONDITION_MET;
925
926 static struct dentry *sdebug_debugfs_root;
927
928 static void sdebug_err_free(struct rcu_head *head)
929 {
930         struct sdebug_err_inject *inject =
931                 container_of(head, typeof(*inject), rcu);
932
933         kfree(inject);
934 }
935
936 static void sdebug_err_add(struct scsi_device *sdev, struct sdebug_err_inject *new)
937 {
938         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
939         struct sdebug_err_inject *err;
940
941         spin_lock(&devip->list_lock);
942         list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
943                 if (err->type == new->type && err->cmd == new->cmd) {
944                         list_del_rcu(&err->list);
945                         call_rcu(&err->rcu, sdebug_err_free);
946                 }
947         }
948
949         list_add_tail_rcu(&new->list, &devip->inject_err_list);
950         spin_unlock(&devip->list_lock);
951 }
952
953 static int sdebug_err_remove(struct scsi_device *sdev, const char *buf, size_t count)
954 {
955         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
956         struct sdebug_err_inject *err;
957         int type;
958         unsigned char cmd;
959
960         if (sscanf(buf, "- %d %hhx", &type, &cmd) != 2) {
961                 kfree(buf);
962                 return -EINVAL;
963         }
964
965         spin_lock(&devip->list_lock);
966         list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
967                 if (err->type == type && err->cmd == cmd) {
968                         list_del_rcu(&err->list);
969                         call_rcu(&err->rcu, sdebug_err_free);
970                         spin_unlock(&devip->list_lock);
971                         kfree(buf);
972                         return count;
973                 }
974         }
975         spin_unlock(&devip->list_lock);
976
977         kfree(buf);
978         return -EINVAL;
979 }
980
981 static int sdebug_error_show(struct seq_file *m, void *p)
982 {
983         struct scsi_device *sdev = (struct scsi_device *)m->private;
984         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdev->hostdata;
985         struct sdebug_err_inject *err;
986
987         seq_puts(m, "Type\tCount\tCommand\n");
988
989         rcu_read_lock();
990         list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
991                 switch (err->type) {
992                 case ERR_TMOUT_CMD:
993                 case ERR_ABORT_CMD_FAILED:
994                 case ERR_LUN_RESET_FAILED:
995                         seq_printf(m, "%d\t%d\t0x%x\n", err->type, err->cnt,
996                                 err->cmd);
997                 break;
998
999                 case ERR_FAIL_QUEUE_CMD:
1000                         seq_printf(m, "%d\t%d\t0x%x\t0x%x\n", err->type,
1001                                 err->cnt, err->cmd, err->queuecmd_ret);
1002                 break;
1003
1004                 case ERR_FAIL_CMD:
1005                         seq_printf(m, "%d\t%d\t0x%x\t0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1006                                 err->type, err->cnt, err->cmd,
1007                                 err->host_byte, err->driver_byte,
1008                                 err->status_byte, err->sense_key,
1009                                 err->asc, err->asq);
1010                 break;
1011                 }
1012         }
1013         rcu_read_unlock();
1014
1015         return 0;
1016 }
1017
1018 static int sdebug_error_open(struct inode *inode, struct file *file)
1019 {
1020         return single_open(file, sdebug_error_show, inode->i_private);
1021 }
1022
1023 static ssize_t sdebug_error_write(struct file *file, const char __user *ubuf,
1024                 size_t count, loff_t *ppos)
1025 {
1026         char *buf;
1027         unsigned int inject_type;
1028         struct sdebug_err_inject *inject;
1029         struct scsi_device *sdev = (struct scsi_device *)file->f_inode->i_private;
1030
1031         buf = kzalloc(count + 1, GFP_KERNEL);
1032         if (!buf)
1033                 return -ENOMEM;
1034
1035         if (copy_from_user(buf, ubuf, count)) {
1036                 kfree(buf);
1037                 return -EFAULT;
1038         }
1039
1040         if (buf[0] == '-')
1041                 return sdebug_err_remove(sdev, buf, count);
1042
1043         if (sscanf(buf, "%d", &inject_type) != 1) {
1044                 kfree(buf);
1045                 return -EINVAL;
1046         }
1047
1048         inject = kzalloc(sizeof(struct sdebug_err_inject), GFP_KERNEL);
1049         if (!inject) {
1050                 kfree(buf);
1051                 return -ENOMEM;
1052         }
1053
1054         switch (inject_type) {
1055         case ERR_TMOUT_CMD:
1056         case ERR_ABORT_CMD_FAILED:
1057         case ERR_LUN_RESET_FAILED:
1058                 if (sscanf(buf, "%d %d %hhx", &inject->type, &inject->cnt,
1059                            &inject->cmd) != 3)
1060                         goto out_error;
1061         break;
1062
1063         case ERR_FAIL_QUEUE_CMD:
1064                 if (sscanf(buf, "%d %d %hhx %x", &inject->type, &inject->cnt,
1065                            &inject->cmd, &inject->queuecmd_ret) != 4)
1066                         goto out_error;
1067         break;
1068
1069         case ERR_FAIL_CMD:
1070                 if (sscanf(buf, "%d %d %hhx %hhx %hhx %hhx %hhx %hhx %hhx",
1071                            &inject->type, &inject->cnt, &inject->cmd,
1072                            &inject->host_byte, &inject->driver_byte,
1073                            &inject->status_byte, &inject->sense_key,
1074                            &inject->asc, &inject->asq) != 9)
1075                         goto out_error;
1076         break;
1077
1078         default:
1079                 goto out_error;
1080         break;
1081         }
1082
1083         kfree(buf);
1084         sdebug_err_add(sdev, inject);
1085
1086         return count;
1087
1088 out_error:
1089         kfree(buf);
1090         kfree(inject);
1091         return -EINVAL;
1092 }
1093
1094 static const struct file_operations sdebug_error_fops = {
1095         .open   = sdebug_error_open,
1096         .read   = seq_read,
1097         .write  = sdebug_error_write,
1098         .release = single_release,
1099 };
1100
1101 static int sdebug_target_reset_fail_show(struct seq_file *m, void *p)
1102 {
1103         struct scsi_target *starget = (struct scsi_target *)m->private;
1104         struct sdebug_target_info *targetip =
1105                 (struct sdebug_target_info *)starget->hostdata;
1106
1107         if (targetip)
1108                 seq_printf(m, "%c\n", targetip->reset_fail ? 'Y' : 'N');
1109
1110         return 0;
1111 }
1112
1113 static int sdebug_target_reset_fail_open(struct inode *inode, struct file *file)
1114 {
1115         return single_open(file, sdebug_target_reset_fail_show, inode->i_private);
1116 }
1117
1118 static ssize_t sdebug_target_reset_fail_write(struct file *file,
1119                 const char __user *ubuf, size_t count, loff_t *ppos)
1120 {
1121         int ret;
1122         struct scsi_target *starget =
1123                 (struct scsi_target *)file->f_inode->i_private;
1124         struct sdebug_target_info *targetip =
1125                 (struct sdebug_target_info *)starget->hostdata;
1126
1127         if (targetip) {
1128                 ret = kstrtobool_from_user(ubuf, count, &targetip->reset_fail);
1129                 return ret < 0 ? ret : count;
1130         }
1131         return -ENODEV;
1132 }
1133
1134 static const struct file_operations sdebug_target_reset_fail_fops = {
1135         .open   = sdebug_target_reset_fail_open,
1136         .read   = seq_read,
1137         .write  = sdebug_target_reset_fail_write,
1138         .release = single_release,
1139 };
1140
1141 static int sdebug_target_alloc(struct scsi_target *starget)
1142 {
1143         struct sdebug_target_info *targetip;
1144
1145         targetip = kzalloc(sizeof(struct sdebug_target_info), GFP_KERNEL);
1146         if (!targetip)
1147                 return -ENOMEM;
1148
1149         targetip->debugfs_entry = debugfs_create_dir(dev_name(&starget->dev),
1150                                 sdebug_debugfs_root);
1151
1152         debugfs_create_file("fail_reset", 0600, targetip->debugfs_entry, starget,
1153                                 &sdebug_target_reset_fail_fops);
1154
1155         starget->hostdata = targetip;
1156
1157         return 0;
1158 }
1159
1160 static void sdebug_tartget_cleanup_async(void *data, async_cookie_t cookie)
1161 {
1162         struct sdebug_target_info *targetip = data;
1163
1164         debugfs_remove(targetip->debugfs_entry);
1165         kfree(targetip);
1166 }
1167
1168 static void sdebug_target_destroy(struct scsi_target *starget)
1169 {
1170         struct sdebug_target_info *targetip;
1171
1172         targetip = (struct sdebug_target_info *)starget->hostdata;
1173         if (targetip) {
1174                 starget->hostdata = NULL;
1175                 async_schedule(sdebug_tartget_cleanup_async, targetip);
1176         }
1177 }
1178
1179 /* Only do the extra work involved in logical block provisioning if one or
1180  * more of the lbpu, lbpws or lbpws10 parameters are given and we are doing
1181  * real reads and writes (i.e. not skipping them for speed).
1182  */
1183 static inline bool scsi_debug_lbp(void)
1184 {
1185         return 0 == sdebug_fake_rw &&
1186                 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
1187 }
1188
1189 static void *lba2fake_store(struct sdeb_store_info *sip,
1190                             unsigned long long lba)
1191 {
1192         struct sdeb_store_info *lsip = sip;
1193
1194         lba = do_div(lba, sdebug_store_sectors);
1195         if (!sip || !sip->storep) {
1196                 WARN_ON_ONCE(true);
1197                 lsip = xa_load(per_store_ap, 0);  /* should never be NULL */
1198         }
1199         return lsip->storep + lba * sdebug_sector_size;
1200 }
1201
1202 static struct t10_pi_tuple *dif_store(struct sdeb_store_info *sip,
1203                                       sector_t sector)
1204 {
1205         sector = sector_div(sector, sdebug_store_sectors);
1206
1207         return sip->dif_storep + sector;
1208 }
1209
1210 static void sdebug_max_tgts_luns(void)
1211 {
1212         struct sdebug_host_info *sdbg_host;
1213         struct Scsi_Host *hpnt;
1214
1215         mutex_lock(&sdebug_host_list_mutex);
1216         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1217                 hpnt = sdbg_host->shost;
1218                 if ((hpnt->this_id >= 0) &&
1219                     (sdebug_num_tgts > hpnt->this_id))
1220                         hpnt->max_id = sdebug_num_tgts + 1;
1221                 else
1222                         hpnt->max_id = sdebug_num_tgts;
1223                 /* sdebug_max_luns; */
1224                 hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
1225         }
1226         mutex_unlock(&sdebug_host_list_mutex);
1227 }
1228
1229 enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
1230
1231 /* Set in_bit to -1 to indicate no bit position of invalid field */
1232 static void mk_sense_invalid_fld(struct scsi_cmnd *scp,
1233                                  enum sdeb_cmd_data c_d,
1234                                  int in_byte, int in_bit)
1235 {
1236         unsigned char *sbuff;
1237         u8 sks[4];
1238         int sl, asc;
1239
1240         sbuff = scp->sense_buffer;
1241         if (!sbuff) {
1242                 sdev_printk(KERN_ERR, scp->device,
1243                             "%s: sense_buffer is NULL\n", __func__);
1244                 return;
1245         }
1246         asc = c_d ? INVALID_FIELD_IN_CDB : INVALID_FIELD_IN_PARAM_LIST;
1247         memset(sbuff, 0, SCSI_SENSE_BUFFERSIZE);
1248         scsi_build_sense(scp, sdebug_dsense, ILLEGAL_REQUEST, asc, 0);
1249         memset(sks, 0, sizeof(sks));
1250         sks[0] = 0x80;
1251         if (c_d)
1252                 sks[0] |= 0x40;
1253         if (in_bit >= 0) {
1254                 sks[0] |= 0x8;
1255                 sks[0] |= 0x7 & in_bit;
1256         }
1257         put_unaligned_be16(in_byte, sks + 1);
1258         if (sdebug_dsense) {
1259                 sl = sbuff[7] + 8;
1260                 sbuff[7] = sl;
1261                 sbuff[sl] = 0x2;
1262                 sbuff[sl + 1] = 0x6;
1263                 memcpy(sbuff + sl + 4, sks, 3);
1264         } else
1265                 memcpy(sbuff + 15, sks, 3);
1266         if (sdebug_verbose)
1267                 sdev_printk(KERN_INFO, scp->device, "%s:  [sense_key,asc,ascq"
1268                             "]: [0x5,0x%x,0x0] %c byte=%d, bit=%d\n",
1269                             my_name, asc, c_d ? 'C' : 'D', in_byte, in_bit);
1270 }
1271
1272 static void mk_sense_buffer(struct scsi_cmnd *scp, int key, int asc, int asq)
1273 {
1274         if (!scp->sense_buffer) {
1275                 sdev_printk(KERN_ERR, scp->device,
1276                             "%s: sense_buffer is NULL\n", __func__);
1277                 return;
1278         }
1279         memset(scp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1280
1281         scsi_build_sense(scp, sdebug_dsense, key, asc, asq);
1282
1283         if (sdebug_verbose)
1284                 sdev_printk(KERN_INFO, scp->device,
1285                             "%s:  [sense_key,asc,ascq]: [0x%x,0x%x,0x%x]\n",
1286                             my_name, key, asc, asq);
1287 }
1288
1289 static void mk_sense_invalid_opcode(struct scsi_cmnd *scp)
1290 {
1291         mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_OPCODE, 0);
1292 }
1293
1294 static int scsi_debug_ioctl(struct scsi_device *dev, unsigned int cmd,
1295                             void __user *arg)
1296 {
1297         if (sdebug_verbose) {
1298                 if (0x1261 == cmd)
1299                         sdev_printk(KERN_INFO, dev,
1300                                     "%s: BLKFLSBUF [0x1261]\n", __func__);
1301                 else if (0x5331 == cmd)
1302                         sdev_printk(KERN_INFO, dev,
1303                                     "%s: CDROM_GET_CAPABILITY [0x5331]\n",
1304                                     __func__);
1305                 else
1306                         sdev_printk(KERN_INFO, dev, "%s: cmd=0x%x\n",
1307                                     __func__, cmd);
1308         }
1309         return -EINVAL;
1310         /* return -ENOTTY; // correct return but upsets fdisk */
1311 }
1312
1313 static void config_cdb_len(struct scsi_device *sdev)
1314 {
1315         switch (sdebug_cdb_len) {
1316         case 6: /* suggest 6 byte READ, WRITE and MODE SENSE/SELECT */
1317                 sdev->use_10_for_rw = false;
1318                 sdev->use_16_for_rw = false;
1319                 sdev->use_10_for_ms = false;
1320                 break;
1321         case 10: /* suggest 10 byte RWs and 6 byte MODE SENSE/SELECT */
1322                 sdev->use_10_for_rw = true;
1323                 sdev->use_16_for_rw = false;
1324                 sdev->use_10_for_ms = false;
1325                 break;
1326         case 12: /* suggest 10 byte RWs and 10 byte MODE SENSE/SELECT */
1327                 sdev->use_10_for_rw = true;
1328                 sdev->use_16_for_rw = false;
1329                 sdev->use_10_for_ms = true;
1330                 break;
1331         case 16:
1332                 sdev->use_10_for_rw = false;
1333                 sdev->use_16_for_rw = true;
1334                 sdev->use_10_for_ms = true;
1335                 break;
1336         case 32: /* No knobs to suggest this so same as 16 for now */
1337                 sdev->use_10_for_rw = false;
1338                 sdev->use_16_for_rw = true;
1339                 sdev->use_10_for_ms = true;
1340                 break;
1341         default:
1342                 pr_warn("unexpected cdb_len=%d, force to 10\n",
1343                         sdebug_cdb_len);
1344                 sdev->use_10_for_rw = true;
1345                 sdev->use_16_for_rw = false;
1346                 sdev->use_10_for_ms = false;
1347                 sdebug_cdb_len = 10;
1348                 break;
1349         }
1350 }
1351
1352 static void all_config_cdb_len(void)
1353 {
1354         struct sdebug_host_info *sdbg_host;
1355         struct Scsi_Host *shost;
1356         struct scsi_device *sdev;
1357
1358         mutex_lock(&sdebug_host_list_mutex);
1359         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
1360                 shost = sdbg_host->shost;
1361                 shost_for_each_device(sdev, shost) {
1362                         config_cdb_len(sdev);
1363                 }
1364         }
1365         mutex_unlock(&sdebug_host_list_mutex);
1366 }
1367
1368 static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
1369 {
1370         struct sdebug_host_info *sdhp = devip->sdbg_host;
1371         struct sdebug_dev_info *dp;
1372
1373         list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
1374                 if ((devip->sdbg_host == dp->sdbg_host) &&
1375                     (devip->target == dp->target)) {
1376                         clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
1377                 }
1378         }
1379 }
1380
1381 static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1382 {
1383         int k;
1384
1385         k = find_first_bit(devip->uas_bm, SDEBUG_NUM_UAS);
1386         if (k != SDEBUG_NUM_UAS) {
1387                 const char *cp = NULL;
1388
1389                 switch (k) {
1390                 case SDEBUG_UA_POR:
1391                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1392                                         POWER_ON_RESET_ASCQ);
1393                         if (sdebug_verbose)
1394                                 cp = "power on reset";
1395                         break;
1396                 case SDEBUG_UA_POOCCUR:
1397                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1398                                         POWER_ON_OCCURRED_ASCQ);
1399                         if (sdebug_verbose)
1400                                 cp = "power on occurred";
1401                         break;
1402                 case SDEBUG_UA_BUS_RESET:
1403                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_RESET_ASC,
1404                                         BUS_RESET_ASCQ);
1405                         if (sdebug_verbose)
1406                                 cp = "bus reset";
1407                         break;
1408                 case SDEBUG_UA_MODE_CHANGED:
1409                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1410                                         MODE_CHANGED_ASCQ);
1411                         if (sdebug_verbose)
1412                                 cp = "mode parameters changed";
1413                         break;
1414                 case SDEBUG_UA_CAPACITY_CHANGED:
1415                         mk_sense_buffer(scp, UNIT_ATTENTION, UA_CHANGED_ASC,
1416                                         CAPACITY_CHANGED_ASCQ);
1417                         if (sdebug_verbose)
1418                                 cp = "capacity data changed";
1419                         break;
1420                 case SDEBUG_UA_MICROCODE_CHANGED:
1421                         mk_sense_buffer(scp, UNIT_ATTENTION,
1422                                         TARGET_CHANGED_ASC,
1423                                         MICROCODE_CHANGED_ASCQ);
1424                         if (sdebug_verbose)
1425                                 cp = "microcode has been changed";
1426                         break;
1427                 case SDEBUG_UA_MICROCODE_CHANGED_WO_RESET:
1428                         mk_sense_buffer(scp, UNIT_ATTENTION,
1429                                         TARGET_CHANGED_ASC,
1430                                         MICROCODE_CHANGED_WO_RESET_ASCQ);
1431                         if (sdebug_verbose)
1432                                 cp = "microcode has been changed without reset";
1433                         break;
1434                 case SDEBUG_UA_LUNS_CHANGED:
1435                         /*
1436                          * SPC-3 behavior is to report a UNIT ATTENTION with
1437                          * ASC/ASCQ REPORTED LUNS DATA HAS CHANGED on every LUN
1438                          * on the target, until a REPORT LUNS command is
1439                          * received.  SPC-4 behavior is to report it only once.
1440                          * NOTE:  sdebug_scsi_level does not use the same
1441                          * values as struct scsi_device->scsi_level.
1442                          */
1443                         if (sdebug_scsi_level >= 6)     /* SPC-4 and above */
1444                                 clear_luns_changed_on_target(devip);
1445                         mk_sense_buffer(scp, UNIT_ATTENTION,
1446                                         TARGET_CHANGED_ASC,
1447                                         LUNS_CHANGED_ASCQ);
1448                         if (sdebug_verbose)
1449                                 cp = "reported luns data has changed";
1450                         break;
1451                 default:
1452                         pr_warn("unexpected unit attention code=%d\n", k);
1453                         if (sdebug_verbose)
1454                                 cp = "unknown";
1455                         break;
1456                 }
1457                 clear_bit(k, devip->uas_bm);
1458                 if (sdebug_verbose)
1459                         sdev_printk(KERN_INFO, scp->device,
1460                                    "%s reports: Unit attention: %s\n",
1461                                    my_name, cp);
1462                 return check_condition_result;
1463         }
1464         return 0;
1465 }
1466
1467 /* Build SCSI "data-in" buffer. Returns 0 if ok else (DID_ERROR << 16). */
1468 static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1469                                 int arr_len)
1470 {
1471         int act_len;
1472         struct scsi_data_buffer *sdb = &scp->sdb;
1473
1474         if (!sdb->length)
1475                 return 0;
1476         if (scp->sc_data_direction != DMA_FROM_DEVICE)
1477                 return DID_ERROR << 16;
1478
1479         act_len = sg_copy_from_buffer(sdb->table.sgl, sdb->table.nents,
1480                                       arr, arr_len);
1481         scsi_set_resid(scp, scsi_bufflen(scp) - act_len);
1482
1483         return 0;
1484 }
1485
1486 /* Partial build of SCSI "data-in" buffer. Returns 0 if ok else
1487  * (DID_ERROR << 16). Can write to offset in data-in buffer. If multiple
1488  * calls, not required to write in ascending offset order. Assumes resid
1489  * set to scsi_bufflen() prior to any calls.
1490  */
1491 static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr,
1492                                   int arr_len, unsigned int off_dst)
1493 {
1494         unsigned int act_len, n;
1495         struct scsi_data_buffer *sdb = &scp->sdb;
1496         off_t skip = off_dst;
1497
1498         if (sdb->length <= off_dst)
1499                 return 0;
1500         if (scp->sc_data_direction != DMA_FROM_DEVICE)
1501                 return DID_ERROR << 16;
1502
1503         act_len = sg_pcopy_from_buffer(sdb->table.sgl, sdb->table.nents,
1504                                        arr, arr_len, skip);
1505         pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n",
1506                  __func__, off_dst, scsi_bufflen(scp), act_len,
1507                  scsi_get_resid(scp));
1508         n = scsi_bufflen(scp) - (off_dst + act_len);
1509         scsi_set_resid(scp, min_t(u32, scsi_get_resid(scp), n));
1510         return 0;
1511 }
1512
1513 /* Fetches from SCSI "data-out" buffer. Returns number of bytes fetched into
1514  * 'arr' or -1 if error.
1515  */
1516 static int fetch_to_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr,
1517                                int arr_len)
1518 {
1519         if (!scsi_bufflen(scp))
1520                 return 0;
1521         if (scp->sc_data_direction != DMA_TO_DEVICE)
1522                 return -1;
1523
1524         return scsi_sg_copy_to_buffer(scp, arr, arr_len);
1525 }
1526
1527
1528 static char sdebug_inq_vendor_id[9] = "Linux   ";
1529 static char sdebug_inq_product_id[17] = "scsi_debug      ";
1530 static char sdebug_inq_product_rev[5] = SDEBUG_VERSION;
1531 /* Use some locally assigned NAAs for SAS addresses. */
1532 static const u64 naa3_comp_a = 0x3222222000000000ULL;
1533 static const u64 naa3_comp_b = 0x3333333000000000ULL;
1534 static const u64 naa3_comp_c = 0x3111111000000000ULL;
1535
1536 /* Device identification VPD page. Returns number of bytes placed in arr */
1537 static int inquiry_vpd_83(unsigned char *arr, int port_group_id,
1538                           int target_dev_id, int dev_id_num,
1539                           const char *dev_id_str, int dev_id_str_len,
1540                           const uuid_t *lu_name)
1541 {
1542         int num, port_a;
1543         char b[32];
1544
1545         port_a = target_dev_id + 1;
1546         /* T10 vendor identifier field format (faked) */
1547         arr[0] = 0x2;   /* ASCII */
1548         arr[1] = 0x1;
1549         arr[2] = 0x0;
1550         memcpy(&arr[4], sdebug_inq_vendor_id, 8);
1551         memcpy(&arr[12], sdebug_inq_product_id, 16);
1552         memcpy(&arr[28], dev_id_str, dev_id_str_len);
1553         num = 8 + 16 + dev_id_str_len;
1554         arr[3] = num;
1555         num += 4;
1556         if (dev_id_num >= 0) {
1557                 if (sdebug_uuid_ctl) {
1558                         /* Locally assigned UUID */
1559                         arr[num++] = 0x1;  /* binary (not necessarily sas) */
1560                         arr[num++] = 0xa;  /* PIV=0, lu, naa */
1561                         arr[num++] = 0x0;
1562                         arr[num++] = 0x12;
1563                         arr[num++] = 0x10; /* uuid type=1, locally assigned */
1564                         arr[num++] = 0x0;
1565                         memcpy(arr + num, lu_name, 16);
1566                         num += 16;
1567                 } else {
1568                         /* NAA-3, Logical unit identifier (binary) */
1569                         arr[num++] = 0x1;  /* binary (not necessarily sas) */
1570                         arr[num++] = 0x3;  /* PIV=0, lu, naa */
1571                         arr[num++] = 0x0;
1572                         arr[num++] = 0x8;
1573                         put_unaligned_be64(naa3_comp_b + dev_id_num, arr + num);
1574                         num += 8;
1575                 }
1576                 /* Target relative port number */
1577                 arr[num++] = 0x61;      /* proto=sas, binary */
1578                 arr[num++] = 0x94;      /* PIV=1, target port, rel port */
1579                 arr[num++] = 0x0;       /* reserved */
1580                 arr[num++] = 0x4;       /* length */
1581                 arr[num++] = 0x0;       /* reserved */
1582                 arr[num++] = 0x0;       /* reserved */
1583                 arr[num++] = 0x0;
1584                 arr[num++] = 0x1;       /* relative port A */
1585         }
1586         /* NAA-3, Target port identifier */
1587         arr[num++] = 0x61;      /* proto=sas, binary */
1588         arr[num++] = 0x93;      /* piv=1, target port, naa */
1589         arr[num++] = 0x0;
1590         arr[num++] = 0x8;
1591         put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1592         num += 8;
1593         /* NAA-3, Target port group identifier */
1594         arr[num++] = 0x61;      /* proto=sas, binary */
1595         arr[num++] = 0x95;      /* piv=1, target port group id */
1596         arr[num++] = 0x0;
1597         arr[num++] = 0x4;
1598         arr[num++] = 0;
1599         arr[num++] = 0;
1600         put_unaligned_be16(port_group_id, arr + num);
1601         num += 2;
1602         /* NAA-3, Target device identifier */
1603         arr[num++] = 0x61;      /* proto=sas, binary */
1604         arr[num++] = 0xa3;      /* piv=1, target device, naa */
1605         arr[num++] = 0x0;
1606         arr[num++] = 0x8;
1607         put_unaligned_be64(naa3_comp_a + target_dev_id, arr + num);
1608         num += 8;
1609         /* SCSI name string: Target device identifier */
1610         arr[num++] = 0x63;      /* proto=sas, UTF-8 */
1611         arr[num++] = 0xa8;      /* piv=1, target device, SCSI name string */
1612         arr[num++] = 0x0;
1613         arr[num++] = 24;
1614         memcpy(arr + num, "naa.32222220", 12);
1615         num += 12;
1616         snprintf(b, sizeof(b), "%08X", target_dev_id);
1617         memcpy(arr + num, b, 8);
1618         num += 8;
1619         memset(arr + num, 0, 4);
1620         num += 4;
1621         return num;
1622 }
1623
1624 static unsigned char vpd84_data[] = {
1625 /* from 4th byte */ 0x22,0x22,0x22,0x0,0xbb,0x0,
1626     0x22,0x22,0x22,0x0,0xbb,0x1,
1627     0x22,0x22,0x22,0x0,0xbb,0x2,
1628 };
1629
1630 /*  Software interface identification VPD page */
1631 static int inquiry_vpd_84(unsigned char *arr)
1632 {
1633         memcpy(arr, vpd84_data, sizeof(vpd84_data));
1634         return sizeof(vpd84_data);
1635 }
1636
1637 /* Management network addresses VPD page */
1638 static int inquiry_vpd_85(unsigned char *arr)
1639 {
1640         int num = 0;
1641         const char *na1 = "https://www.kernel.org/config";
1642         const char *na2 = "http://www.kernel.org/log";
1643         int plen, olen;
1644
1645         arr[num++] = 0x1;       /* lu, storage config */
1646         arr[num++] = 0x0;       /* reserved */
1647         arr[num++] = 0x0;
1648         olen = strlen(na1);
1649         plen = olen + 1;
1650         if (plen % 4)
1651                 plen = ((plen / 4) + 1) * 4;
1652         arr[num++] = plen;      /* length, null termianted, padded */
1653         memcpy(arr + num, na1, olen);
1654         memset(arr + num + olen, 0, plen - olen);
1655         num += plen;
1656
1657         arr[num++] = 0x4;       /* lu, logging */
1658         arr[num++] = 0x0;       /* reserved */
1659         arr[num++] = 0x0;
1660         olen = strlen(na2);
1661         plen = olen + 1;
1662         if (plen % 4)
1663                 plen = ((plen / 4) + 1) * 4;
1664         arr[num++] = plen;      /* length, null terminated, padded */
1665         memcpy(arr + num, na2, olen);
1666         memset(arr + num + olen, 0, plen - olen);
1667         num += plen;
1668
1669         return num;
1670 }
1671
1672 /* SCSI ports VPD page */
1673 static int inquiry_vpd_88(unsigned char *arr, int target_dev_id)
1674 {
1675         int num = 0;
1676         int port_a, port_b;
1677
1678         port_a = target_dev_id + 1;
1679         port_b = port_a + 1;
1680         arr[num++] = 0x0;       /* reserved */
1681         arr[num++] = 0x0;       /* reserved */
1682         arr[num++] = 0x0;
1683         arr[num++] = 0x1;       /* relative port 1 (primary) */
1684         memset(arr + num, 0, 6);
1685         num += 6;
1686         arr[num++] = 0x0;
1687         arr[num++] = 12;        /* length tp descriptor */
1688         /* naa-5 target port identifier (A) */
1689         arr[num++] = 0x61;      /* proto=sas, binary */
1690         arr[num++] = 0x93;      /* PIV=1, target port, NAA */
1691         arr[num++] = 0x0;       /* reserved */
1692         arr[num++] = 0x8;       /* length */
1693         put_unaligned_be64(naa3_comp_a + port_a, arr + num);
1694         num += 8;
1695         arr[num++] = 0x0;       /* reserved */
1696         arr[num++] = 0x0;       /* reserved */
1697         arr[num++] = 0x0;
1698         arr[num++] = 0x2;       /* relative port 2 (secondary) */
1699         memset(arr + num, 0, 6);
1700         num += 6;
1701         arr[num++] = 0x0;
1702         arr[num++] = 12;        /* length tp descriptor */
1703         /* naa-5 target port identifier (B) */
1704         arr[num++] = 0x61;      /* proto=sas, binary */
1705         arr[num++] = 0x93;      /* PIV=1, target port, NAA */
1706         arr[num++] = 0x0;       /* reserved */
1707         arr[num++] = 0x8;       /* length */
1708         put_unaligned_be64(naa3_comp_a + port_b, arr + num);
1709         num += 8;
1710
1711         return num;
1712 }
1713
1714
1715 static unsigned char vpd89_data[] = {
1716 /* from 4th byte */ 0,0,0,0,
1717 'l','i','n','u','x',' ',' ',' ',
1718 'S','A','T',' ','s','c','s','i','_','d','e','b','u','g',' ',' ',
1719 '1','2','3','4',
1720 0x34,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1721 0xec,0,0,0,
1722 0x5a,0xc,0xff,0x3f,0x37,0xc8,0x10,0,0,0,0,0,0x3f,0,0,0,
1723 0,0,0,0,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x58,0x20,0x20,0x20,0x20,
1724 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0,0,0,0x40,0x4,0,0x2e,0x33,
1725 0x38,0x31,0x20,0x20,0x20,0x20,0x54,0x53,0x38,0x33,0x30,0x30,0x33,0x31,
1726 0x53,0x41,
1727 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1728 0x20,0x20,
1729 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,
1730 0x10,0x80,
1731 0,0,0,0x2f,0,0,0,0x2,0,0x2,0x7,0,0xff,0xff,0x1,0,
1732 0x3f,0,0xc1,0xff,0x3e,0,0x10,0x1,0xb0,0xf8,0x50,0x9,0,0,0x7,0,
1733 0x3,0,0x78,0,0x78,0,0xf0,0,0x78,0,0,0,0,0,0,0,
1734 0,0,0,0,0,0,0,0,0x2,0,0,0,0,0,0,0,
1735 0x7e,0,0x1b,0,0x6b,0x34,0x1,0x7d,0x3,0x40,0x69,0x34,0x1,0x3c,0x3,0x40,
1736 0x7f,0x40,0,0,0,0,0xfe,0xfe,0,0,0,0,0,0xfe,0,0,
1737 0,0,0,0,0,0,0,0,0xb0,0xf8,0x50,0x9,0,0,0,0,
1738 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1739 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1740 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1741 0x1,0,0xb0,0xf8,0x50,0x9,0xb0,0xf8,0x50,0x9,0x20,0x20,0x2,0,0xb6,0x42,
1742 0,0x80,0x8a,0,0x6,0x3c,0xa,0x3c,0xff,0xff,0xc6,0x7,0,0x1,0,0x8,
1743 0xf0,0xf,0,0x10,0x2,0,0x30,0,0,0,0,0,0,0,0x6,0xfe,
1744 0,0,0x2,0,0x50,0,0x8a,0,0x4f,0x95,0,0,0x21,0,0xb,0,
1745 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1746 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1747 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1748 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1749 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1750 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1751 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1752 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1753 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1754 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1755 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1756 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xa5,0x51,
1757 };
1758
1759 /* ATA Information VPD page */
1760 static int inquiry_vpd_89(unsigned char *arr)
1761 {
1762         memcpy(arr, vpd89_data, sizeof(vpd89_data));
1763         return sizeof(vpd89_data);
1764 }
1765
1766
1767 static unsigned char vpdb0_data[] = {
1768         /* from 4th byte */ 0,0,0,4, 0,0,0x4,0, 0,0,0,64,
1769         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1770         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1771         0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1772 };
1773
1774 /* Block limits VPD page (SBC-3) */
1775 static int inquiry_vpd_b0(unsigned char *arr)
1776 {
1777         unsigned int gran;
1778
1779         memcpy(arr, vpdb0_data, sizeof(vpdb0_data));
1780
1781         /* Optimal transfer length granularity */
1782         if (sdebug_opt_xferlen_exp != 0 &&
1783             sdebug_physblk_exp < sdebug_opt_xferlen_exp)
1784                 gran = 1 << sdebug_opt_xferlen_exp;
1785         else
1786                 gran = 1 << sdebug_physblk_exp;
1787         put_unaligned_be16(gran, arr + 2);
1788
1789         /* Maximum Transfer Length */
1790         if (sdebug_store_sectors > 0x400)
1791                 put_unaligned_be32(sdebug_store_sectors, arr + 4);
1792
1793         /* Optimal Transfer Length */
1794         put_unaligned_be32(sdebug_opt_blks, &arr[8]);
1795
1796         if (sdebug_lbpu) {
1797                 /* Maximum Unmap LBA Count */
1798                 put_unaligned_be32(sdebug_unmap_max_blocks, &arr[16]);
1799
1800                 /* Maximum Unmap Block Descriptor Count */
1801                 put_unaligned_be32(sdebug_unmap_max_desc, &arr[20]);
1802         }
1803
1804         /* Unmap Granularity Alignment */
1805         if (sdebug_unmap_alignment) {
1806                 put_unaligned_be32(sdebug_unmap_alignment, &arr[28]);
1807                 arr[28] |= 0x80; /* UGAVALID */
1808         }
1809
1810         /* Optimal Unmap Granularity */
1811         put_unaligned_be32(sdebug_unmap_granularity, &arr[24]);
1812
1813         /* Maximum WRITE SAME Length */
1814         put_unaligned_be64(sdebug_write_same_length, &arr[32]);
1815
1816         return 0x3c; /* Mandatory page length for Logical Block Provisioning */
1817 }
1818
1819 /* Block device characteristics VPD page (SBC-3) */
1820 static int inquiry_vpd_b1(struct sdebug_dev_info *devip, unsigned char *arr)
1821 {
1822         memset(arr, 0, 0x3c);
1823         arr[0] = 0;
1824         arr[1] = 1;     /* non rotating medium (e.g. solid state) */
1825         arr[2] = 0;
1826         arr[3] = 5;     /* less than 1.8" */
1827
1828         return 0x3c;
1829 }
1830
1831 /* Logical block provisioning VPD page (SBC-4) */
1832 static int inquiry_vpd_b2(unsigned char *arr)
1833 {
1834         memset(arr, 0, 0x4);
1835         arr[0] = 0;                     /* threshold exponent */
1836         if (sdebug_lbpu)
1837                 arr[1] = 1 << 7;
1838         if (sdebug_lbpws)
1839                 arr[1] |= 1 << 6;
1840         if (sdebug_lbpws10)
1841                 arr[1] |= 1 << 5;
1842         if (sdebug_lbprz && scsi_debug_lbp())
1843                 arr[1] |= (sdebug_lbprz & 0x7) << 2;  /* sbc4r07 and later */
1844         /* anc_sup=0; dp=0 (no provisioning group descriptor) */
1845         /* minimum_percentage=0; provisioning_type=0 (unknown) */
1846         /* threshold_percentage=0 */
1847         return 0x4;
1848 }
1849
1850 /* Zoned block device characteristics VPD page (ZBC mandatory) */
1851 static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
1852 {
1853         memset(arr, 0, 0x3c);
1854         arr[0] = 0x1; /* set URSWRZ (unrestricted read in seq. wr req zone) */
1855         /*
1856          * Set Optimal number of open sequential write preferred zones and
1857          * Optimal number of non-sequentially written sequential write
1858          * preferred zones fields to 'not reported' (0xffffffff). Leave other
1859          * fields set to zero, apart from Max. number of open swrz_s field.
1860          */
1861         put_unaligned_be32(0xffffffff, &arr[4]);
1862         put_unaligned_be32(0xffffffff, &arr[8]);
1863         if (sdeb_zbc_model == BLK_ZONED_HM && devip->max_open)
1864                 put_unaligned_be32(devip->max_open, &arr[12]);
1865         else
1866                 put_unaligned_be32(0xffffffff, &arr[12]);
1867         if (devip->zcap < devip->zsize) {
1868                 arr[19] = ZBC_CONSTANT_ZONE_START_OFFSET;
1869                 put_unaligned_be64(devip->zsize, &arr[20]);
1870         } else {
1871                 arr[19] = 0;
1872         }
1873         return 0x3c;
1874 }
1875
1876 #define SDEBUG_BLE_LEN_AFTER_B4 28      /* thus vpage 32 bytes long */
1877
1878 enum { MAXIMUM_NUMBER_OF_STREAMS = 6, PERMANENT_STREAM_COUNT = 5 };
1879
1880 /* Block limits extension VPD page (SBC-4) */
1881 static int inquiry_vpd_b7(unsigned char *arrb4)
1882 {
1883         memset(arrb4, 0, SDEBUG_BLE_LEN_AFTER_B4);
1884         arrb4[1] = 1; /* Reduced stream control support (RSCS) */
1885         put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS, &arrb4[2]);
1886         return SDEBUG_BLE_LEN_AFTER_B4;
1887 }
1888
1889 #define SDEBUG_LONG_INQ_SZ 96
1890 #define SDEBUG_MAX_INQ_ARR_SZ 584
1891
1892 static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1893 {
1894         unsigned char pq_pdt;
1895         unsigned char *arr;
1896         unsigned char *cmd = scp->cmnd;
1897         u32 alloc_len, n;
1898         int ret;
1899         bool have_wlun, is_disk, is_zbc, is_disk_zbc;
1900
1901         alloc_len = get_unaligned_be16(cmd + 3);
1902         arr = kzalloc(SDEBUG_MAX_INQ_ARR_SZ, GFP_ATOMIC);
1903         if (! arr)
1904                 return DID_REQUEUE << 16;
1905         is_disk = (sdebug_ptype == TYPE_DISK);
1906         is_zbc = devip->zoned;
1907         is_disk_zbc = (is_disk || is_zbc);
1908         have_wlun = scsi_is_wlun(scp->device->lun);
1909         if (have_wlun)
1910                 pq_pdt = TYPE_WLUN;     /* present, wlun */
1911         else if (sdebug_no_lun_0 && (devip->lun == SDEBUG_LUN_0_VAL))
1912                 pq_pdt = 0x7f;  /* not present, PQ=3, PDT=0x1f */
1913         else
1914                 pq_pdt = (sdebug_ptype & 0x1f);
1915         arr[0] = pq_pdt;
1916         if (0x2 & cmd[1]) {  /* CMDDT bit set */
1917                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 1);
1918                 kfree(arr);
1919                 return check_condition_result;
1920         } else if (0x1 & cmd[1]) {  /* EVPD bit set */
1921                 int lu_id_num, port_group_id, target_dev_id;
1922                 u32 len;
1923                 char lu_id_str[6];
1924                 int host_no = devip->sdbg_host->shost->host_no;
1925
1926                 arr[1] = cmd[2];
1927                 port_group_id = (((host_no + 1) & 0x7f) << 8) +
1928                     (devip->channel & 0x7f);
1929                 if (sdebug_vpd_use_hostno == 0)
1930                         host_no = 0;
1931                 lu_id_num = have_wlun ? -1 : (((host_no + 1) * 2000) +
1932                             (devip->target * 1000) + devip->lun);
1933                 target_dev_id = ((host_no + 1) * 2000) +
1934                                  (devip->target * 1000) - 3;
1935                 len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
1936                 if (0 == cmd[2]) { /* supported vital product data pages */
1937                         n = 4;
1938                         arr[n++] = 0x0;   /* this page */
1939                         arr[n++] = 0x80;  /* unit serial number */
1940                         arr[n++] = 0x83;  /* device identification */
1941                         arr[n++] = 0x84;  /* software interface ident. */
1942                         arr[n++] = 0x85;  /* management network addresses */
1943                         arr[n++] = 0x86;  /* extended inquiry */
1944                         arr[n++] = 0x87;  /* mode page policy */
1945                         arr[n++] = 0x88;  /* SCSI ports */
1946                         if (is_disk_zbc) {        /* SBC or ZBC */
1947                                 arr[n++] = 0x89;  /* ATA information */
1948                                 arr[n++] = 0xb0;  /* Block limits */
1949                                 arr[n++] = 0xb1;  /* Block characteristics */
1950                                 if (is_disk)
1951                                         arr[n++] = 0xb2;  /* LB Provisioning */
1952                                 if (is_zbc)
1953                                         arr[n++] = 0xb6;  /* ZB dev. char. */
1954                                 arr[n++] = 0xb7;  /* Block limits extension */
1955                         }
1956                         arr[3] = n - 4;   /* number of supported VPD pages */
1957                 } else if (0x80 == cmd[2]) { /* unit serial number */
1958                         arr[3] = len;
1959                         memcpy(&arr[4], lu_id_str, len);
1960                 } else if (0x83 == cmd[2]) { /* device identification */
1961                         arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
1962                                                 target_dev_id, lu_id_num,
1963                                                 lu_id_str, len,
1964                                                 &devip->lu_name);
1965                 } else if (0x84 == cmd[2]) { /* Software interface ident. */
1966                         arr[3] = inquiry_vpd_84(&arr[4]);
1967                 } else if (0x85 == cmd[2]) { /* Management network addresses */
1968                         arr[3] = inquiry_vpd_85(&arr[4]);
1969                 } else if (0x86 == cmd[2]) { /* extended inquiry */
1970                         arr[3] = 0x3c;  /* number of following entries */
1971                         if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1972                                 arr[4] = 0x4;   /* SPT: GRD_CHK:1 */
1973                         else if (have_dif_prot)
1974                                 arr[4] = 0x5;   /* SPT: GRD_CHK:1, REF_CHK:1 */
1975                         else
1976                                 arr[4] = 0x0;   /* no protection stuff */
1977                         /*
1978                          * GROUP_SUP=1; HEADSUP=1 (HEAD OF QUEUE); ORDSUP=1
1979                          * (ORDERED queuing); SIMPSUP=1 (SIMPLE queuing).
1980                          */
1981                         arr[5] = 0x17;
1982                 } else if (0x87 == cmd[2]) { /* mode page policy */
1983                         arr[3] = 0x8;   /* number of following entries */
1984                         arr[4] = 0x2;   /* disconnect-reconnect mp */
1985                         arr[6] = 0x80;  /* mlus, shared */
1986                         arr[8] = 0x18;   /* protocol specific lu */
1987                         arr[10] = 0x82;  /* mlus, per initiator port */
1988                 } else if (0x88 == cmd[2]) { /* SCSI Ports */
1989                         arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
1990                 } else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
1991                         n = inquiry_vpd_89(&arr[4]);
1992                         put_unaligned_be16(n, arr + 2);
1993                 } else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
1994                         arr[3] = inquiry_vpd_b0(&arr[4]);
1995                 } else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
1996                         arr[3] = inquiry_vpd_b1(devip, &arr[4]);
1997                 } else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
1998                         arr[3] = inquiry_vpd_b2(&arr[4]);
1999                 } else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
2000                         arr[3] = inquiry_vpd_b6(devip, &arr[4]);
2001                 } else if (cmd[2] == 0xb7) { /* block limits extension page */
2002                         arr[3] = inquiry_vpd_b7(&arr[4]);
2003                 } else {
2004                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
2005                         kfree(arr);
2006                         return check_condition_result;
2007                 }
2008                 len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
2009                 ret = fill_from_dev_buffer(scp, arr,
2010                             min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
2011                 kfree(arr);
2012                 return ret;
2013         }
2014         /* drops through here for a standard inquiry */
2015         arr[1] = sdebug_removable ? 0x80 : 0;   /* Removable disk */
2016         arr[2] = sdebug_scsi_level;
2017         arr[3] = 2;    /* response_data_format==2 */
2018         arr[4] = SDEBUG_LONG_INQ_SZ - 5;
2019         arr[5] = (int)have_dif_prot;    /* PROTECT bit */
2020         if (sdebug_vpd_use_hostno == 0)
2021                 arr[5] |= 0x10; /* claim: implicit TPGS */
2022         arr[6] = 0x10; /* claim: MultiP */
2023         /* arr[6] |= 0x40; ... claim: EncServ (enclosure services) */
2024         arr[7] = 0xa; /* claim: LINKED + CMDQUE */
2025         memcpy(&arr[8], sdebug_inq_vendor_id, 8);
2026         memcpy(&arr[16], sdebug_inq_product_id, 16);
2027         memcpy(&arr[32], sdebug_inq_product_rev, 4);
2028         /* Use Vendor Specific area to place driver date in ASCII hex */
2029         memcpy(&arr[36], sdebug_version_date, 8);
2030         /* version descriptors (2 bytes each) follow */
2031         put_unaligned_be16(0xc0, arr + 58);   /* SAM-6 no version claimed */
2032         put_unaligned_be16(0x5c0, arr + 60);  /* SPC-5 no version claimed */
2033         n = 62;
2034         if (is_disk) {          /* SBC-4 no version claimed */
2035                 put_unaligned_be16(0x600, arr + n);
2036                 n += 2;
2037         } else if (sdebug_ptype == TYPE_TAPE) { /* SSC-4 rev 3 */
2038                 put_unaligned_be16(0x525, arr + n);
2039                 n += 2;
2040         } else if (is_zbc) {    /* ZBC BSR INCITS 536 revision 05 */
2041                 put_unaligned_be16(0x624, arr + n);
2042                 n += 2;
2043         }
2044         put_unaligned_be16(0x2100, arr + n);    /* SPL-4 no version claimed */
2045         ret = fill_from_dev_buffer(scp, arr,
2046                             min_t(u32, alloc_len, SDEBUG_LONG_INQ_SZ));
2047         kfree(arr);
2048         return ret;
2049 }
2050
2051 /* See resp_iec_m_pg() for how this data is manipulated */
2052 static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2053                                    0, 0, 0x0, 0x0};
2054
2055 static int resp_requests(struct scsi_cmnd *scp,
2056                          struct sdebug_dev_info *devip)
2057 {
2058         unsigned char *cmd = scp->cmnd;
2059         unsigned char arr[SCSI_SENSE_BUFFERSIZE];       /* assume >= 18 bytes */
2060         bool dsense = !!(cmd[1] & 1);
2061         u32 alloc_len = cmd[4];
2062         u32 len = 18;
2063         int stopped_state = atomic_read(&devip->stopped);
2064
2065         memset(arr, 0, sizeof(arr));
2066         if (stopped_state > 0) {        /* some "pollable" data [spc6r02: 5.12.2] */
2067                 if (dsense) {
2068                         arr[0] = 0x72;
2069                         arr[1] = NOT_READY;
2070                         arr[2] = LOGICAL_UNIT_NOT_READY;
2071                         arr[3] = (stopped_state == 2) ? 0x1 : 0x2;
2072                         len = 8;
2073                 } else {
2074                         arr[0] = 0x70;
2075                         arr[2] = NOT_READY;             /* NO_SENSE in sense_key */
2076                         arr[7] = 0xa;                   /* 18 byte sense buffer */
2077                         arr[12] = LOGICAL_UNIT_NOT_READY;
2078                         arr[13] = (stopped_state == 2) ? 0x1 : 0x2;
2079                 }
2080         } else if ((iec_m_pg[2] & 0x4) && (6 == (iec_m_pg[3] & 0xf))) {
2081                 /* Information exceptions control mode page: TEST=1, MRIE=6 */
2082                 if (dsense) {
2083                         arr[0] = 0x72;
2084                         arr[1] = 0x0;           /* NO_SENSE in sense_key */
2085                         arr[2] = THRESHOLD_EXCEEDED;
2086                         arr[3] = 0xff;          /* Failure prediction(false) */
2087                         len = 8;
2088                 } else {
2089                         arr[0] = 0x70;
2090                         arr[2] = 0x0;           /* NO_SENSE in sense_key */
2091                         arr[7] = 0xa;           /* 18 byte sense buffer */
2092                         arr[12] = THRESHOLD_EXCEEDED;
2093                         arr[13] = 0xff;         /* Failure prediction(false) */
2094                 }
2095         } else {        /* nothing to report */
2096                 if (dsense) {
2097                         len = 8;
2098                         memset(arr, 0, len);
2099                         arr[0] = 0x72;
2100                 } else {
2101                         memset(arr, 0, len);
2102                         arr[0] = 0x70;
2103                         arr[7] = 0xa;
2104                 }
2105         }
2106         return fill_from_dev_buffer(scp, arr, min_t(u32, len, alloc_len));
2107 }
2108
2109 static int resp_start_stop(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2110 {
2111         unsigned char *cmd = scp->cmnd;
2112         int power_cond, want_stop, stopped_state;
2113         bool changing;
2114
2115         power_cond = (cmd[4] & 0xf0) >> 4;
2116         if (power_cond) {
2117                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 7);
2118                 return check_condition_result;
2119         }
2120         want_stop = !(cmd[4] & 1);
2121         stopped_state = atomic_read(&devip->stopped);
2122         if (stopped_state == 2) {
2123                 ktime_t now_ts = ktime_get_boottime();
2124
2125                 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
2126                         u64 diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
2127
2128                         if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
2129                                 /* tur_ms_to_ready timer extinguished */
2130                                 atomic_set(&devip->stopped, 0);
2131                                 stopped_state = 0;
2132                         }
2133                 }
2134                 if (stopped_state == 2) {
2135                         if (want_stop) {
2136                                 stopped_state = 1;      /* dummy up success */
2137                         } else {        /* Disallow tur_ms_to_ready delay to be overridden */
2138                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, 0 /* START bit */);
2139                                 return check_condition_result;
2140                         }
2141                 }
2142         }
2143         changing = (stopped_state != want_stop);
2144         if (changing)
2145                 atomic_xchg(&devip->stopped, want_stop);
2146         if (!changing || (cmd[1] & 0x1))  /* state unchanged or IMMED bit set in cdb */
2147                 return SDEG_RES_IMMED_MASK;
2148         else
2149                 return 0;
2150 }
2151
2152 static sector_t get_sdebug_capacity(void)
2153 {
2154         static const unsigned int gibibyte = 1073741824;
2155
2156         if (sdebug_virtual_gb > 0)
2157                 return (sector_t)sdebug_virtual_gb *
2158                         (gibibyte / sdebug_sector_size);
2159         else
2160                 return sdebug_store_sectors;
2161 }
2162
2163 #define SDEBUG_READCAP_ARR_SZ 8
2164 static int resp_readcap(struct scsi_cmnd *scp,
2165                         struct sdebug_dev_info *devip)
2166 {
2167         unsigned char arr[SDEBUG_READCAP_ARR_SZ];
2168         unsigned int capac;
2169
2170         /* following just in case virtual_gb changed */
2171         sdebug_capacity = get_sdebug_capacity();
2172         memset(arr, 0, SDEBUG_READCAP_ARR_SZ);
2173         if (sdebug_capacity < 0xffffffff) {
2174                 capac = (unsigned int)sdebug_capacity - 1;
2175                 put_unaligned_be32(capac, arr + 0);
2176         } else
2177                 put_unaligned_be32(0xffffffff, arr + 0);
2178         put_unaligned_be16(sdebug_sector_size, arr + 6);
2179         return fill_from_dev_buffer(scp, arr, SDEBUG_READCAP_ARR_SZ);
2180 }
2181
2182 #define SDEBUG_READCAP16_ARR_SZ 32
2183 static int resp_readcap16(struct scsi_cmnd *scp,
2184                           struct sdebug_dev_info *devip)
2185 {
2186         unsigned char *cmd = scp->cmnd;
2187         unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
2188         u32 alloc_len;
2189
2190         alloc_len = get_unaligned_be32(cmd + 10);
2191         /* following just in case virtual_gb changed */
2192         sdebug_capacity = get_sdebug_capacity();
2193         memset(arr, 0, SDEBUG_READCAP16_ARR_SZ);
2194         put_unaligned_be64((u64)(sdebug_capacity - 1), arr + 0);
2195         put_unaligned_be32(sdebug_sector_size, arr + 8);
2196         arr[13] = sdebug_physblk_exp & 0xf;
2197         arr[14] = (sdebug_lowest_aligned >> 8) & 0x3f;
2198
2199         if (scsi_debug_lbp()) {
2200                 arr[14] |= 0x80; /* LBPME */
2201                 /* from sbc4r07, this LBPRZ field is 1 bit, but the LBPRZ in
2202                  * the LB Provisioning VPD page is 3 bits. Note that lbprz=2
2203                  * in the wider field maps to 0 in this field.
2204                  */
2205                 if (sdebug_lbprz & 1)   /* precisely what the draft requires */
2206                         arr[14] |= 0x40;
2207         }
2208
2209         /*
2210          * Since the scsi_debug READ CAPACITY implementation always reports the
2211          * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices.
2212          */
2213         if (devip->zoned)
2214                 arr[12] |= 1 << 4;
2215
2216         arr[15] = sdebug_lowest_aligned & 0xff;
2217
2218         if (have_dif_prot) {
2219                 arr[12] = (sdebug_dif - 1) << 1; /* P_TYPE */
2220                 arr[12] |= 1; /* PROT_EN */
2221         }
2222
2223         return fill_from_dev_buffer(scp, arr,
2224                             min_t(u32, alloc_len, SDEBUG_READCAP16_ARR_SZ));
2225 }
2226
2227 #define SDEBUG_MAX_TGTPGS_ARR_SZ 1412
2228
2229 static int resp_report_tgtpgs(struct scsi_cmnd *scp,
2230                               struct sdebug_dev_info *devip)
2231 {
2232         unsigned char *cmd = scp->cmnd;
2233         unsigned char *arr;
2234         int host_no = devip->sdbg_host->shost->host_no;
2235         int port_group_a, port_group_b, port_a, port_b;
2236         u32 alen, n, rlen;
2237         int ret;
2238
2239         alen = get_unaligned_be32(cmd + 6);
2240         arr = kzalloc(SDEBUG_MAX_TGTPGS_ARR_SZ, GFP_ATOMIC);
2241         if (! arr)
2242                 return DID_REQUEUE << 16;
2243         /*
2244          * EVPD page 0x88 states we have two ports, one
2245          * real and a fake port with no device connected.
2246          * So we create two port groups with one port each
2247          * and set the group with port B to unavailable.
2248          */
2249         port_a = 0x1; /* relative port A */
2250         port_b = 0x2; /* relative port B */
2251         port_group_a = (((host_no + 1) & 0x7f) << 8) +
2252                         (devip->channel & 0x7f);
2253         port_group_b = (((host_no + 1) & 0x7f) << 8) +
2254                         (devip->channel & 0x7f) + 0x80;
2255
2256         /*
2257          * The asymmetric access state is cycled according to the host_id.
2258          */
2259         n = 4;
2260         if (sdebug_vpd_use_hostno == 0) {
2261                 arr[n++] = host_no % 3; /* Asymm access state */
2262                 arr[n++] = 0x0F; /* claim: all states are supported */
2263         } else {
2264                 arr[n++] = 0x0; /* Active/Optimized path */
2265                 arr[n++] = 0x01; /* only support active/optimized paths */
2266         }
2267         put_unaligned_be16(port_group_a, arr + n);
2268         n += 2;
2269         arr[n++] = 0;    /* Reserved */
2270         arr[n++] = 0;    /* Status code */
2271         arr[n++] = 0;    /* Vendor unique */
2272         arr[n++] = 0x1;  /* One port per group */
2273         arr[n++] = 0;    /* Reserved */
2274         arr[n++] = 0;    /* Reserved */
2275         put_unaligned_be16(port_a, arr + n);
2276         n += 2;
2277         arr[n++] = 3;    /* Port unavailable */
2278         arr[n++] = 0x08; /* claim: only unavailalbe paths are supported */
2279         put_unaligned_be16(port_group_b, arr + n);
2280         n += 2;
2281         arr[n++] = 0;    /* Reserved */
2282         arr[n++] = 0;    /* Status code */
2283         arr[n++] = 0;    /* Vendor unique */
2284         arr[n++] = 0x1;  /* One port per group */
2285         arr[n++] = 0;    /* Reserved */
2286         arr[n++] = 0;    /* Reserved */
2287         put_unaligned_be16(port_b, arr + n);
2288         n += 2;
2289
2290         rlen = n - 4;
2291         put_unaligned_be32(rlen, arr + 0);
2292
2293         /*
2294          * Return the smallest value of either
2295          * - The allocated length
2296          * - The constructed command length
2297          * - The maximum array size
2298          */
2299         rlen = min(alen, n);
2300         ret = fill_from_dev_buffer(scp, arr,
2301                            min_t(u32, rlen, SDEBUG_MAX_TGTPGS_ARR_SZ));
2302         kfree(arr);
2303         return ret;
2304 }
2305
2306 static int resp_rsup_opcodes(struct scsi_cmnd *scp,
2307                              struct sdebug_dev_info *devip)
2308 {
2309         bool rctd;
2310         u8 reporting_opts, req_opcode, sdeb_i, supp;
2311         u16 req_sa, u;
2312         u32 alloc_len, a_len;
2313         int k, offset, len, errsts, count, bump, na;
2314         const struct opcode_info_t *oip;
2315         const struct opcode_info_t *r_oip;
2316         u8 *arr;
2317         u8 *cmd = scp->cmnd;
2318
2319         rctd = !!(cmd[2] & 0x80);
2320         reporting_opts = cmd[2] & 0x7;
2321         req_opcode = cmd[3];
2322         req_sa = get_unaligned_be16(cmd + 4);
2323         alloc_len = get_unaligned_be32(cmd + 6);
2324         if (alloc_len < 4 || alloc_len > 0xffff) {
2325                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2326                 return check_condition_result;
2327         }
2328         if (alloc_len > 8192)
2329                 a_len = 8192;
2330         else
2331                 a_len = alloc_len;
2332         arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
2333         if (NULL == arr) {
2334                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
2335                                 INSUFF_RES_ASCQ);
2336                 return check_condition_result;
2337         }
2338         switch (reporting_opts) {
2339         case 0: /* all commands */
2340                 /* count number of commands */
2341                 for (count = 0, oip = opcode_info_arr;
2342                      oip->num_attached != 0xff; ++oip) {
2343                         if (F_INV_OP & oip->flags)
2344                                 continue;
2345                         count += (oip->num_attached + 1);
2346                 }
2347                 bump = rctd ? 20 : 8;
2348                 put_unaligned_be32(count * bump, arr);
2349                 for (offset = 4, oip = opcode_info_arr;
2350                      oip->num_attached != 0xff && offset < a_len; ++oip) {
2351                         if (F_INV_OP & oip->flags)
2352                                 continue;
2353                         na = oip->num_attached;
2354                         arr[offset] = oip->opcode;
2355                         put_unaligned_be16(oip->sa, arr + offset + 2);
2356                         if (rctd)
2357                                 arr[offset + 5] |= 0x2;
2358                         if (FF_SA & oip->flags)
2359                                 arr[offset + 5] |= 0x1;
2360                         put_unaligned_be16(oip->len_mask[0], arr + offset + 6);
2361                         if (rctd)
2362                                 put_unaligned_be16(0xa, arr + offset + 8);
2363                         r_oip = oip;
2364                         for (k = 0, oip = oip->arrp; k < na; ++k, ++oip) {
2365                                 if (F_INV_OP & oip->flags)
2366                                         continue;
2367                                 offset += bump;
2368                                 arr[offset] = oip->opcode;
2369                                 put_unaligned_be16(oip->sa, arr + offset + 2);
2370                                 if (rctd)
2371                                         arr[offset + 5] |= 0x2;
2372                                 if (FF_SA & oip->flags)
2373                                         arr[offset + 5] |= 0x1;
2374                                 put_unaligned_be16(oip->len_mask[0],
2375                                                    arr + offset + 6);
2376                                 if (rctd)
2377                                         put_unaligned_be16(0xa,
2378                                                            arr + offset + 8);
2379                         }
2380                         oip = r_oip;
2381                         offset += bump;
2382                 }
2383                 break;
2384         case 1: /* one command: opcode only */
2385         case 2: /* one command: opcode plus service action */
2386         case 3: /* one command: if sa==0 then opcode only else opcode+sa */
2387                 sdeb_i = opcode_ind_arr[req_opcode];
2388                 oip = &opcode_info_arr[sdeb_i];
2389                 if (F_INV_OP & oip->flags) {
2390                         supp = 1;
2391                         offset = 4;
2392                 } else {
2393                         if (1 == reporting_opts) {
2394                                 if (FF_SA & oip->flags) {
2395                                         mk_sense_invalid_fld(scp, SDEB_IN_CDB,
2396                                                              2, 2);
2397                                         kfree(arr);
2398                                         return check_condition_result;
2399                                 }
2400                                 req_sa = 0;
2401                         } else if (2 == reporting_opts &&
2402                                    0 == (FF_SA & oip->flags)) {
2403                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
2404                                 kfree(arr);     /* point at requested sa */
2405                                 return check_condition_result;
2406                         }
2407                         if (0 == (FF_SA & oip->flags) &&
2408                             req_opcode == oip->opcode)
2409                                 supp = 3;
2410                         else if (0 == (FF_SA & oip->flags)) {
2411                                 na = oip->num_attached;
2412                                 for (k = 0, oip = oip->arrp; k < na;
2413                                      ++k, ++oip) {
2414                                         if (req_opcode == oip->opcode)
2415                                                 break;
2416                                 }
2417                                 supp = (k >= na) ? 1 : 3;
2418                         } else if (req_sa != oip->sa) {
2419                                 na = oip->num_attached;
2420                                 for (k = 0, oip = oip->arrp; k < na;
2421                                      ++k, ++oip) {
2422                                         if (req_sa == oip->sa)
2423                                                 break;
2424                                 }
2425                                 supp = (k >= na) ? 1 : 3;
2426                         } else
2427                                 supp = 3;
2428                         if (3 == supp) {
2429                                 u = oip->len_mask[0];
2430                                 put_unaligned_be16(u, arr + 2);
2431                                 arr[4] = oip->opcode;
2432                                 for (k = 1; k < u; ++k)
2433                                         arr[4 + k] = (k < 16) ?
2434                                                  oip->len_mask[k] : 0xff;
2435                                 offset = 4 + u;
2436                         } else
2437                                 offset = 4;
2438                 }
2439                 arr[1] = (rctd ? 0x80 : 0) | supp;
2440                 if (rctd) {
2441                         put_unaligned_be16(0xa, arr + offset);
2442                         offset += 12;
2443                 }
2444                 break;
2445         default:
2446                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
2447                 kfree(arr);
2448                 return check_condition_result;
2449         }
2450         offset = (offset < a_len) ? offset : a_len;
2451         len = (offset < alloc_len) ? offset : alloc_len;
2452         errsts = fill_from_dev_buffer(scp, arr, len);
2453         kfree(arr);
2454         return errsts;
2455 }
2456
2457 static int resp_rsup_tmfs(struct scsi_cmnd *scp,
2458                           struct sdebug_dev_info *devip)
2459 {
2460         bool repd;
2461         u32 alloc_len, len;
2462         u8 arr[16];
2463         u8 *cmd = scp->cmnd;
2464
2465         memset(arr, 0, sizeof(arr));
2466         repd = !!(cmd[2] & 0x80);
2467         alloc_len = get_unaligned_be32(cmd + 6);
2468         if (alloc_len < 4) {
2469                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
2470                 return check_condition_result;
2471         }
2472         arr[0] = 0xc8;          /* ATS | ATSS | LURS */
2473         arr[1] = 0x1;           /* ITNRS */
2474         if (repd) {
2475                 arr[3] = 0xc;
2476                 len = 16;
2477         } else
2478                 len = 4;
2479
2480         len = (len < alloc_len) ? len : alloc_len;
2481         return fill_from_dev_buffer(scp, arr, len);
2482 }
2483
2484 /* <<Following mode page info copied from ST318451LW>> */
2485
2486 static int resp_err_recov_pg(unsigned char *p, int pcontrol, int target)
2487 {       /* Read-Write Error Recovery page for mode_sense */
2488         unsigned char err_recov_pg[] = {0x1, 0xa, 0xc0, 11, 240, 0, 0, 0,
2489                                         5, 0, 0xff, 0xff};
2490
2491         memcpy(p, err_recov_pg, sizeof(err_recov_pg));
2492         if (1 == pcontrol)
2493                 memset(p + 2, 0, sizeof(err_recov_pg) - 2);
2494         return sizeof(err_recov_pg);
2495 }
2496
2497 static int resp_disconnect_pg(unsigned char *p, int pcontrol, int target)
2498 {       /* Disconnect-Reconnect page for mode_sense */
2499         unsigned char disconnect_pg[] = {0x2, 0xe, 128, 128, 0, 10, 0, 0,
2500                                          0, 0, 0, 0, 0, 0, 0, 0};
2501
2502         memcpy(p, disconnect_pg, sizeof(disconnect_pg));
2503         if (1 == pcontrol)
2504                 memset(p + 2, 0, sizeof(disconnect_pg) - 2);
2505         return sizeof(disconnect_pg);
2506 }
2507
2508 static int resp_format_pg(unsigned char *p, int pcontrol, int target)
2509 {       /* Format device page for mode_sense */
2510         unsigned char format_pg[] = {0x3, 0x16, 0, 0, 0, 0, 0, 0,
2511                                      0, 0, 0, 0, 0, 0, 0, 0,
2512                                      0, 0, 0, 0, 0x40, 0, 0, 0};
2513
2514         memcpy(p, format_pg, sizeof(format_pg));
2515         put_unaligned_be16(sdebug_sectors_per, p + 10);
2516         put_unaligned_be16(sdebug_sector_size, p + 12);
2517         if (sdebug_removable)
2518                 p[20] |= 0x20; /* should agree with INQUIRY */
2519         if (1 == pcontrol)
2520                 memset(p + 2, 0, sizeof(format_pg) - 2);
2521         return sizeof(format_pg);
2522 }
2523
2524 static unsigned char caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2525                                      0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,
2526                                      0, 0, 0, 0};
2527
2528 static int resp_caching_pg(unsigned char *p, int pcontrol, int target)
2529 {       /* Caching page for mode_sense */
2530         unsigned char ch_caching_pg[] = {/* 0x8, 18, */ 0x4, 0, 0, 0, 0, 0,
2531                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
2532         unsigned char d_caching_pg[] = {0x8, 18, 0x14, 0, 0xff, 0xff, 0, 0,
2533                 0xff, 0xff, 0xff, 0xff, 0x80, 0x14, 0, 0,     0, 0, 0, 0};
2534
2535         if (SDEBUG_OPT_N_WCE & sdebug_opts)
2536                 caching_pg[2] &= ~0x4;  /* set WCE=0 (default WCE=1) */
2537         memcpy(p, caching_pg, sizeof(caching_pg));
2538         if (1 == pcontrol)
2539                 memcpy(p + 2, ch_caching_pg, sizeof(ch_caching_pg));
2540         else if (2 == pcontrol)
2541                 memcpy(p, d_caching_pg, sizeof(d_caching_pg));
2542         return sizeof(caching_pg);
2543 }
2544
2545 static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2546                                     0, 0, 0x2, 0x4b};
2547
2548 static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
2549 {       /* Control mode page for mode_sense */
2550         unsigned char ch_ctrl_m_pg[] = {/* 0xa, 10, */ 0x6, 0, 0, 0, 0, 0,
2551                                         0, 0, 0, 0};
2552         unsigned char d_ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,
2553                                      0, 0, 0x2, 0x4b};
2554
2555         if (sdebug_dsense)
2556                 ctrl_m_pg[2] |= 0x4;
2557         else
2558                 ctrl_m_pg[2] &= ~0x4;
2559
2560         if (sdebug_ato)
2561                 ctrl_m_pg[5] |= 0x80; /* ATO=1 */
2562
2563         memcpy(p, ctrl_m_pg, sizeof(ctrl_m_pg));
2564         if (1 == pcontrol)
2565                 memcpy(p + 2, ch_ctrl_m_pg, sizeof(ch_ctrl_m_pg));
2566         else if (2 == pcontrol)
2567                 memcpy(p, d_ctrl_m_pg, sizeof(d_ctrl_m_pg));
2568         return sizeof(ctrl_m_pg);
2569 }
2570
2571 /* IO Advice Hints Grouping mode page */
2572 static int resp_grouping_m_pg(unsigned char *p, int pcontrol, int target)
2573 {
2574         /* IO Advice Hints Grouping mode page */
2575         struct grouping_m_pg {
2576                 u8 page_code;   /* OR 0x40 when subpage_code > 0 */
2577                 u8 subpage_code;
2578                 __be16 page_length;
2579                 u8 reserved[12];
2580                 struct scsi_io_group_descriptor descr[MAXIMUM_NUMBER_OF_STREAMS];
2581         };
2582         static const struct grouping_m_pg gr_m_pg = {
2583                 .page_code = 0xa | 0x40,
2584                 .subpage_code = 5,
2585                 .page_length = cpu_to_be16(sizeof(gr_m_pg) - 4),
2586                 .descr = {
2587                         { .st_enble = 1 },
2588                         { .st_enble = 1 },
2589                         { .st_enble = 1 },
2590                         { .st_enble = 1 },
2591                         { .st_enble = 1 },
2592                         { .st_enble = 0 },
2593                 }
2594         };
2595
2596         BUILD_BUG_ON(sizeof(struct grouping_m_pg) !=
2597                      16 + MAXIMUM_NUMBER_OF_STREAMS * 16);
2598         memcpy(p, &gr_m_pg, sizeof(gr_m_pg));
2599         if (1 == pcontrol) {
2600                 /* There are no changeable values so clear from byte 4 on. */
2601                 memset(p + 4, 0, sizeof(gr_m_pg) - 4);
2602         }
2603         return sizeof(gr_m_pg);
2604 }
2605
2606 static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
2607 {       /* Informational Exceptions control mode page for mode_sense */
2608         unsigned char ch_iec_m_pg[] = {/* 0x1c, 0xa, */ 0x4, 0xf, 0, 0, 0, 0,
2609                                        0, 0, 0x0, 0x0};
2610         unsigned char d_iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,
2611                                       0, 0, 0x0, 0x0};
2612
2613         memcpy(p, iec_m_pg, sizeof(iec_m_pg));
2614         if (1 == pcontrol)
2615                 memcpy(p + 2, ch_iec_m_pg, sizeof(ch_iec_m_pg));
2616         else if (2 == pcontrol)
2617                 memcpy(p, d_iec_m_pg, sizeof(d_iec_m_pg));
2618         return sizeof(iec_m_pg);
2619 }
2620
2621 static int resp_sas_sf_m_pg(unsigned char *p, int pcontrol, int target)
2622 {       /* SAS SSP mode page - short format for mode_sense */
2623         unsigned char sas_sf_m_pg[] = {0x19, 0x6,
2624                 0x6, 0x0, 0x7, 0xd0, 0x0, 0x0};
2625
2626         memcpy(p, sas_sf_m_pg, sizeof(sas_sf_m_pg));
2627         if (1 == pcontrol)
2628                 memset(p + 2, 0, sizeof(sas_sf_m_pg) - 2);
2629         return sizeof(sas_sf_m_pg);
2630 }
2631
2632
2633 static int resp_sas_pcd_m_spg(unsigned char *p, int pcontrol, int target,
2634                               int target_dev_id)
2635 {       /* SAS phy control and discover mode page for mode_sense */
2636         unsigned char sas_pcd_m_pg[] = {0x59, 0x1, 0, 0x64, 0, 0x6, 0, 2,
2637                     0, 0, 0, 0, 0x10, 0x9, 0x8, 0x0,
2638                     0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
2639                     0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
2640                     0x2, 0, 0, 0, 0, 0, 0, 0,
2641                     0x88, 0x99, 0, 0, 0, 0, 0, 0,
2642                     0, 0, 0, 0, 0, 0, 0, 0,
2643                     0, 1, 0, 0, 0x10, 0x9, 0x8, 0x0,
2644                     0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
2645                     0, 0, 0, 0, 0, 0, 0, 0,     /* insert SAS addr */
2646                     0x3, 0, 0, 0, 0, 0, 0, 0,
2647                     0x88, 0x99, 0, 0, 0, 0, 0, 0,
2648                     0, 0, 0, 0, 0, 0, 0, 0,
2649                 };
2650         int port_a, port_b;
2651
2652         put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 16);
2653         put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 24);
2654         put_unaligned_be64(naa3_comp_a, sas_pcd_m_pg + 64);
2655         put_unaligned_be64(naa3_comp_c + 1, sas_pcd_m_pg + 72);
2656         port_a = target_dev_id + 1;
2657         port_b = port_a + 1;
2658         memcpy(p, sas_pcd_m_pg, sizeof(sas_pcd_m_pg));
2659         put_unaligned_be32(port_a, p + 20);
2660         put_unaligned_be32(port_b, p + 48 + 20);
2661         if (1 == pcontrol)
2662                 memset(p + 4, 0, sizeof(sas_pcd_m_pg) - 4);
2663         return sizeof(sas_pcd_m_pg);
2664 }
2665
2666 static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
2667 {       /* SAS SSP shared protocol specific port mode subpage */
2668         unsigned char sas_sha_m_pg[] = {0x59, 0x2, 0, 0xc, 0, 0x6, 0x10, 0,
2669                     0, 0, 0, 0, 0, 0, 0, 0,
2670                 };
2671
2672         memcpy(p, sas_sha_m_pg, sizeof(sas_sha_m_pg));
2673         if (1 == pcontrol)
2674                 memset(p + 4, 0, sizeof(sas_sha_m_pg) - 4);
2675         return sizeof(sas_sha_m_pg);
2676 }
2677
2678 /* PAGE_SIZE is more than necessary but provides room for future expansion. */
2679 #define SDEBUG_MAX_MSENSE_SZ PAGE_SIZE
2680
2681 static int resp_mode_sense(struct scsi_cmnd *scp,
2682                            struct sdebug_dev_info *devip)
2683 {
2684         int pcontrol, pcode, subpcode, bd_len;
2685         unsigned char dev_spec;
2686         u32 alloc_len, offset, len;
2687         int target_dev_id;
2688         int target = scp->device->id;
2689         unsigned char *ap;
2690         unsigned char *arr __free(kfree);
2691         unsigned char *cmd = scp->cmnd;
2692         bool dbd, llbaa, msense_6, is_disk, is_zbc;
2693
2694         arr = kzalloc(SDEBUG_MAX_MSENSE_SZ, GFP_ATOMIC);
2695         if (!arr)
2696                 return -ENOMEM;
2697         dbd = !!(cmd[1] & 0x8);         /* disable block descriptors */
2698         pcontrol = (cmd[2] & 0xc0) >> 6;
2699         pcode = cmd[2] & 0x3f;
2700         subpcode = cmd[3];
2701         msense_6 = (MODE_SENSE == cmd[0]);
2702         llbaa = msense_6 ? false : !!(cmd[1] & 0x10);
2703         is_disk = (sdebug_ptype == TYPE_DISK);
2704         is_zbc = devip->zoned;
2705         if ((is_disk || is_zbc) && !dbd)
2706                 bd_len = llbaa ? 16 : 8;
2707         else
2708                 bd_len = 0;
2709         alloc_len = msense_6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2710         memset(arr, 0, SDEBUG_MAX_MSENSE_SZ);
2711         if (0x3 == pcontrol) {  /* Saving values not supported */
2712                 mk_sense_buffer(scp, ILLEGAL_REQUEST, SAVING_PARAMS_UNSUP, 0);
2713                 return check_condition_result;
2714         }
2715         target_dev_id = ((devip->sdbg_host->shost->host_no + 1) * 2000) +
2716                         (devip->target * 1000) - 3;
2717         /* for disks+zbc set DPOFUA bit and clear write protect (WP) bit */
2718         if (is_disk || is_zbc) {
2719                 dev_spec = 0x10;        /* =0x90 if WP=1 implies read-only */
2720                 if (sdebug_wp)
2721                         dev_spec |= 0x80;
2722         } else
2723                 dev_spec = 0x0;
2724         if (msense_6) {
2725                 arr[2] = dev_spec;
2726                 arr[3] = bd_len;
2727                 offset = 4;
2728         } else {
2729                 arr[3] = dev_spec;
2730                 if (16 == bd_len)
2731                         arr[4] = 0x1;   /* set LONGLBA bit */
2732                 arr[7] = bd_len;        /* assume 255 or less */
2733                 offset = 8;
2734         }
2735         ap = arr + offset;
2736         if ((bd_len > 0) && (!sdebug_capacity))
2737                 sdebug_capacity = get_sdebug_capacity();
2738
2739         if (8 == bd_len) {
2740                 if (sdebug_capacity > 0xfffffffe)
2741                         put_unaligned_be32(0xffffffff, ap + 0);
2742                 else
2743                         put_unaligned_be32(sdebug_capacity, ap + 0);
2744                 put_unaligned_be16(sdebug_sector_size, ap + 6);
2745                 offset += bd_len;
2746                 ap = arr + offset;
2747         } else if (16 == bd_len) {
2748                 put_unaligned_be64((u64)sdebug_capacity, ap + 0);
2749                 put_unaligned_be32(sdebug_sector_size, ap + 12);
2750                 offset += bd_len;
2751                 ap = arr + offset;
2752         }
2753
2754         /*
2755          * N.B. If len>0 before resp_*_pg() call, then form of that call should be:
2756          *        len += resp_*_pg(ap + len, pcontrol, target);
2757          */
2758         switch (pcode) {
2759         case 0x1:       /* Read-Write error recovery page, direct access */
2760                 if (subpcode > 0x0 && subpcode < 0xff)
2761                         goto bad_subpcode;
2762                 len = resp_err_recov_pg(ap, pcontrol, target);
2763                 offset += len;
2764                 break;
2765         case 0x2:       /* Disconnect-Reconnect page, all devices */
2766                 if (subpcode > 0x0 && subpcode < 0xff)
2767                         goto bad_subpcode;
2768                 len = resp_disconnect_pg(ap, pcontrol, target);
2769                 offset += len;
2770                 break;
2771         case 0x3:       /* Format device page, direct access */
2772                 if (subpcode > 0x0 && subpcode < 0xff)
2773                         goto bad_subpcode;
2774                 if (is_disk) {
2775                         len = resp_format_pg(ap, pcontrol, target);
2776                         offset += len;
2777                 } else {
2778                         goto bad_pcode;
2779                 }
2780                 break;
2781         case 0x8:       /* Caching page, direct access */
2782                 if (subpcode > 0x0 && subpcode < 0xff)
2783                         goto bad_subpcode;
2784                 if (is_disk || is_zbc) {
2785                         len = resp_caching_pg(ap, pcontrol, target);
2786                         offset += len;
2787                 } else {
2788                         goto bad_pcode;
2789                 }
2790                 break;
2791         case 0xa:       /* Control Mode page, all devices */
2792                 switch (subpcode) {
2793                 case 0:
2794                         len = resp_ctrl_m_pg(ap, pcontrol, target);
2795                         break;
2796                 case 0x05:
2797                         len = resp_grouping_m_pg(ap, pcontrol, target);
2798                         break;
2799                 case 0xff:
2800                         len = resp_ctrl_m_pg(ap, pcontrol, target);
2801                         len += resp_grouping_m_pg(ap + len, pcontrol, target);
2802                         break;
2803                 default:
2804                         goto bad_subpcode;
2805                 }
2806                 offset += len;
2807                 break;
2808         case 0x19:      /* if spc==1 then sas phy, control+discover */
2809                 if (subpcode > 0x2 && subpcode < 0xff)
2810                         goto bad_subpcode;
2811                 len = 0;
2812                 if ((0x0 == subpcode) || (0xff == subpcode))
2813                         len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2814                 if ((0x1 == subpcode) || (0xff == subpcode))
2815                         len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2816                                                   target_dev_id);
2817                 if ((0x2 == subpcode) || (0xff == subpcode))
2818                         len += resp_sas_sha_m_spg(ap + len, pcontrol);
2819                 offset += len;
2820                 break;
2821         case 0x1c:      /* Informational Exceptions Mode page, all devices */
2822                 if (subpcode > 0x0 && subpcode < 0xff)
2823                         goto bad_subpcode;
2824                 len = resp_iec_m_pg(ap, pcontrol, target);
2825                 offset += len;
2826                 break;
2827         case 0x3f:      /* Read all Mode pages */
2828                 if (subpcode > 0x0 && subpcode < 0xff)
2829                         goto bad_subpcode;
2830                 len = resp_err_recov_pg(ap, pcontrol, target);
2831                 len += resp_disconnect_pg(ap + len, pcontrol, target);
2832                 if (is_disk) {
2833                         len += resp_format_pg(ap + len, pcontrol, target);
2834                         len += resp_caching_pg(ap + len, pcontrol, target);
2835                 } else if (is_zbc) {
2836                         len += resp_caching_pg(ap + len, pcontrol, target);
2837                 }
2838                 len += resp_ctrl_m_pg(ap + len, pcontrol, target);
2839                 if (0xff == subpcode)
2840                         len += resp_grouping_m_pg(ap + len, pcontrol, target);
2841                 len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
2842                 if (0xff == subpcode) {
2843                         len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
2844                                                   target_dev_id);
2845                         len += resp_sas_sha_m_spg(ap + len, pcontrol);
2846                 }
2847                 len += resp_iec_m_pg(ap + len, pcontrol, target);
2848                 offset += len;
2849                 break;
2850         default:
2851                 goto bad_pcode;
2852         }
2853         if (msense_6)
2854                 arr[0] = offset - 1;
2855         else
2856                 put_unaligned_be16((offset - 2), arr + 0);
2857         return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
2858
2859 bad_pcode:
2860         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
2861         return check_condition_result;
2862
2863 bad_subpcode:
2864         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
2865         return check_condition_result;
2866 }
2867
2868 #define SDEBUG_MAX_MSELECT_SZ 512
2869
2870 static int resp_mode_select(struct scsi_cmnd *scp,
2871                             struct sdebug_dev_info *devip)
2872 {
2873         int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
2874         int param_len, res, mpage;
2875         unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
2876         unsigned char *cmd = scp->cmnd;
2877         int mselect6 = (MODE_SELECT == cmd[0]);
2878
2879         memset(arr, 0, sizeof(arr));
2880         pf = cmd[1] & 0x10;
2881         sp = cmd[1] & 0x1;
2882         param_len = mselect6 ? cmd[4] : get_unaligned_be16(cmd + 7);
2883         if ((0 == pf) || sp || (param_len > SDEBUG_MAX_MSELECT_SZ)) {
2884                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, mselect6 ? 4 : 7, -1);
2885                 return check_condition_result;
2886         }
2887         res = fetch_to_dev_buffer(scp, arr, param_len);
2888         if (-1 == res)
2889                 return DID_ERROR << 16;
2890         else if (sdebug_verbose && (res < param_len))
2891                 sdev_printk(KERN_INFO, scp->device,
2892                             "%s: cdb indicated=%d, IO sent=%d bytes\n",
2893                             __func__, param_len, res);
2894         md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
2895         bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
2896         off = bd_len + (mselect6 ? 4 : 8);
2897         if (md_len > 2 || off >= res) {
2898                 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
2899                 return check_condition_result;
2900         }
2901         mpage = arr[off] & 0x3f;
2902         ps = !!(arr[off] & 0x80);
2903         if (ps) {
2904                 mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 7);
2905                 return check_condition_result;
2906         }
2907         spf = !!(arr[off] & 0x40);
2908         pg_len = spf ? (get_unaligned_be16(arr + off + 2) + 4) :
2909                        (arr[off + 1] + 2);
2910         if ((pg_len + off) > param_len) {
2911                 mk_sense_buffer(scp, ILLEGAL_REQUEST,
2912                                 PARAMETER_LIST_LENGTH_ERR, 0);
2913                 return check_condition_result;
2914         }
2915         switch (mpage) {
2916         case 0x8:      /* Caching Mode page */
2917                 if (caching_pg[1] == arr[off + 1]) {
2918                         memcpy(caching_pg + 2, arr + off + 2,
2919                                sizeof(caching_pg) - 2);
2920                         goto set_mode_changed_ua;
2921                 }
2922                 break;
2923         case 0xa:      /* Control Mode page */
2924                 if (ctrl_m_pg[1] == arr[off + 1]) {
2925                         memcpy(ctrl_m_pg + 2, arr + off + 2,
2926                                sizeof(ctrl_m_pg) - 2);
2927                         if (ctrl_m_pg[4] & 0x8)
2928                                 sdebug_wp = true;
2929                         else
2930                                 sdebug_wp = false;
2931                         sdebug_dsense = !!(ctrl_m_pg[2] & 0x4);
2932                         goto set_mode_changed_ua;
2933                 }
2934                 break;
2935         case 0x1c:      /* Informational Exceptions Mode page */
2936                 if (iec_m_pg[1] == arr[off + 1]) {
2937                         memcpy(iec_m_pg + 2, arr + off + 2,
2938                                sizeof(iec_m_pg) - 2);
2939                         goto set_mode_changed_ua;
2940                 }
2941                 break;
2942         default:
2943                 break;
2944         }
2945         mk_sense_invalid_fld(scp, SDEB_IN_DATA, off, 5);
2946         return check_condition_result;
2947 set_mode_changed_ua:
2948         set_bit(SDEBUG_UA_MODE_CHANGED, devip->uas_bm);
2949         return 0;
2950 }
2951
2952 static int resp_temp_l_pg(unsigned char *arr)
2953 {
2954         unsigned char temp_l_pg[] = {0x0, 0x0, 0x3, 0x2, 0x0, 38,
2955                                      0x0, 0x1, 0x3, 0x2, 0x0, 65,
2956                 };
2957
2958         memcpy(arr, temp_l_pg, sizeof(temp_l_pg));
2959         return sizeof(temp_l_pg);
2960 }
2961
2962 static int resp_ie_l_pg(unsigned char *arr)
2963 {
2964         unsigned char ie_l_pg[] = {0x0, 0x0, 0x3, 0x3, 0x0, 0x0, 38,
2965                 };
2966
2967         memcpy(arr, ie_l_pg, sizeof(ie_l_pg));
2968         if (iec_m_pg[2] & 0x4) {        /* TEST bit set */
2969                 arr[4] = THRESHOLD_EXCEEDED;
2970                 arr[5] = 0xff;
2971         }
2972         return sizeof(ie_l_pg);
2973 }
2974
2975 static int resp_env_rep_l_spg(unsigned char *arr)
2976 {
2977         unsigned char env_rep_l_spg[] = {0x0, 0x0, 0x23, 0x8,
2978                                          0x0, 40, 72, 0xff, 45, 18, 0, 0,
2979                                          0x1, 0x0, 0x23, 0x8,
2980                                          0x0, 55, 72, 35, 55, 45, 0, 0,
2981                 };
2982
2983         memcpy(arr, env_rep_l_spg, sizeof(env_rep_l_spg));
2984         return sizeof(env_rep_l_spg);
2985 }
2986
2987 #define SDEBUG_MAX_LSENSE_SZ 512
2988
2989 static int resp_log_sense(struct scsi_cmnd *scp,
2990                           struct sdebug_dev_info *devip)
2991 {
2992         int ppc, sp, pcode, subpcode;
2993         u32 alloc_len, len, n;
2994         unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
2995         unsigned char *cmd = scp->cmnd;
2996
2997         memset(arr, 0, sizeof(arr));
2998         ppc = cmd[1] & 0x2;
2999         sp = cmd[1] & 0x1;
3000         if (ppc || sp) {
3001                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, ppc ? 1 : 0);
3002                 return check_condition_result;
3003         }
3004         pcode = cmd[2] & 0x3f;
3005         subpcode = cmd[3] & 0xff;
3006         alloc_len = get_unaligned_be16(cmd + 7);
3007         arr[0] = pcode;
3008         if (0 == subpcode) {
3009                 switch (pcode) {
3010                 case 0x0:       /* Supported log pages log page */
3011                         n = 4;
3012                         arr[n++] = 0x0;         /* this page */
3013                         arr[n++] = 0xd;         /* Temperature */
3014                         arr[n++] = 0x2f;        /* Informational exceptions */
3015                         arr[3] = n - 4;
3016                         break;
3017                 case 0xd:       /* Temperature log page */
3018                         arr[3] = resp_temp_l_pg(arr + 4);
3019                         break;
3020                 case 0x2f:      /* Informational exceptions log page */
3021                         arr[3] = resp_ie_l_pg(arr + 4);
3022                         break;
3023                 default:
3024                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3025                         return check_condition_result;
3026                 }
3027         } else if (0xff == subpcode) {
3028                 arr[0] |= 0x40;
3029                 arr[1] = subpcode;
3030                 switch (pcode) {
3031                 case 0x0:       /* Supported log pages and subpages log page */
3032                         n = 4;
3033                         arr[n++] = 0x0;
3034                         arr[n++] = 0x0;         /* 0,0 page */
3035                         arr[n++] = 0x0;
3036                         arr[n++] = 0xff;        /* this page */
3037                         arr[n++] = 0xd;
3038                         arr[n++] = 0x0;         /* Temperature */
3039                         arr[n++] = 0xd;
3040                         arr[n++] = 0x1;         /* Environment reporting */
3041                         arr[n++] = 0xd;
3042                         arr[n++] = 0xff;        /* all 0xd subpages */
3043                         arr[n++] = 0x2f;
3044                         arr[n++] = 0x0; /* Informational exceptions */
3045                         arr[n++] = 0x2f;
3046                         arr[n++] = 0xff;        /* all 0x2f subpages */
3047                         arr[3] = n - 4;
3048                         break;
3049                 case 0xd:       /* Temperature subpages */
3050                         n = 4;
3051                         arr[n++] = 0xd;
3052                         arr[n++] = 0x0;         /* Temperature */
3053                         arr[n++] = 0xd;
3054                         arr[n++] = 0x1;         /* Environment reporting */
3055                         arr[n++] = 0xd;
3056                         arr[n++] = 0xff;        /* these subpages */
3057                         arr[3] = n - 4;
3058                         break;
3059                 case 0x2f:      /* Informational exceptions subpages */
3060                         n = 4;
3061                         arr[n++] = 0x2f;
3062                         arr[n++] = 0x0;         /* Informational exceptions */
3063                         arr[n++] = 0x2f;
3064                         arr[n++] = 0xff;        /* these subpages */
3065                         arr[3] = n - 4;
3066                         break;
3067                 default:
3068                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3069                         return check_condition_result;
3070                 }
3071         } else if (subpcode > 0) {
3072                 arr[0] |= 0x40;
3073                 arr[1] = subpcode;
3074                 if (pcode == 0xd && subpcode == 1)
3075                         arr[3] = resp_env_rep_l_spg(arr + 4);
3076                 else {
3077                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
3078                         return check_condition_result;
3079                 }
3080         } else {
3081                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
3082                 return check_condition_result;
3083         }
3084         len = min_t(u32, get_unaligned_be16(arr + 2) + 4, alloc_len);
3085         return fill_from_dev_buffer(scp, arr,
3086                     min_t(u32, len, SDEBUG_MAX_INQ_ARR_SZ));
3087 }
3088
3089 static inline bool sdebug_dev_is_zoned(struct sdebug_dev_info *devip)
3090 {
3091         return devip->nr_zones != 0;
3092 }
3093
3094 static struct sdeb_zone_state *zbc_zone(struct sdebug_dev_info *devip,
3095                                         unsigned long long lba)
3096 {
3097         u32 zno = lba >> devip->zsize_shift;
3098         struct sdeb_zone_state *zsp;
3099
3100         if (devip->zcap == devip->zsize || zno < devip->nr_conv_zones)
3101                 return &devip->zstate[zno];
3102
3103         /*
3104          * If the zone capacity is less than the zone size, adjust for gap
3105          * zones.
3106          */
3107         zno = 2 * zno - devip->nr_conv_zones;
3108         WARN_ONCE(zno >= devip->nr_zones, "%u > %u\n", zno, devip->nr_zones);
3109         zsp = &devip->zstate[zno];
3110         if (lba >= zsp->z_start + zsp->z_size)
3111                 zsp++;
3112         WARN_ON_ONCE(lba >= zsp->z_start + zsp->z_size);
3113         return zsp;
3114 }
3115
3116 static inline bool zbc_zone_is_conv(struct sdeb_zone_state *zsp)
3117 {
3118         return zsp->z_type == ZBC_ZTYPE_CNV;
3119 }
3120
3121 static inline bool zbc_zone_is_gap(struct sdeb_zone_state *zsp)
3122 {
3123         return zsp->z_type == ZBC_ZTYPE_GAP;
3124 }
3125
3126 static inline bool zbc_zone_is_seq(struct sdeb_zone_state *zsp)
3127 {
3128         return !zbc_zone_is_conv(zsp) && !zbc_zone_is_gap(zsp);
3129 }
3130
3131 static void zbc_close_zone(struct sdebug_dev_info *devip,
3132                            struct sdeb_zone_state *zsp)
3133 {
3134         enum sdebug_z_cond zc;
3135
3136         if (!zbc_zone_is_seq(zsp))
3137                 return;
3138
3139         zc = zsp->z_cond;
3140         if (!(zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN))
3141                 return;
3142
3143         if (zc == ZC2_IMPLICIT_OPEN)
3144                 devip->nr_imp_open--;
3145         else
3146                 devip->nr_exp_open--;
3147
3148         if (zsp->z_wp == zsp->z_start) {
3149                 zsp->z_cond = ZC1_EMPTY;
3150         } else {
3151                 zsp->z_cond = ZC4_CLOSED;
3152                 devip->nr_closed++;
3153         }
3154 }
3155
3156 static void zbc_close_imp_open_zone(struct sdebug_dev_info *devip)
3157 {
3158         struct sdeb_zone_state *zsp = &devip->zstate[0];
3159         unsigned int i;
3160
3161         for (i = 0; i < devip->nr_zones; i++, zsp++) {
3162                 if (zsp->z_cond == ZC2_IMPLICIT_OPEN) {
3163                         zbc_close_zone(devip, zsp);
3164                         return;
3165                 }
3166         }
3167 }
3168
3169 static void zbc_open_zone(struct sdebug_dev_info *devip,
3170                           struct sdeb_zone_state *zsp, bool explicit)
3171 {
3172         enum sdebug_z_cond zc;
3173
3174         if (!zbc_zone_is_seq(zsp))
3175                 return;
3176
3177         zc = zsp->z_cond;
3178         if ((explicit && zc == ZC3_EXPLICIT_OPEN) ||
3179             (!explicit && zc == ZC2_IMPLICIT_OPEN))
3180                 return;
3181
3182         /* Close an implicit open zone if necessary */
3183         if (explicit && zsp->z_cond == ZC2_IMPLICIT_OPEN)
3184                 zbc_close_zone(devip, zsp);
3185         else if (devip->max_open &&
3186                  devip->nr_imp_open + devip->nr_exp_open >= devip->max_open)
3187                 zbc_close_imp_open_zone(devip);
3188
3189         if (zsp->z_cond == ZC4_CLOSED)
3190                 devip->nr_closed--;
3191         if (explicit) {
3192                 zsp->z_cond = ZC3_EXPLICIT_OPEN;
3193                 devip->nr_exp_open++;
3194         } else {
3195                 zsp->z_cond = ZC2_IMPLICIT_OPEN;
3196                 devip->nr_imp_open++;
3197         }
3198 }
3199
3200 static inline void zbc_set_zone_full(struct sdebug_dev_info *devip,
3201                                      struct sdeb_zone_state *zsp)
3202 {
3203         switch (zsp->z_cond) {
3204         case ZC2_IMPLICIT_OPEN:
3205                 devip->nr_imp_open--;
3206                 break;
3207         case ZC3_EXPLICIT_OPEN:
3208                 devip->nr_exp_open--;
3209                 break;
3210         default:
3211                 WARN_ONCE(true, "Invalid zone %llu condition %x\n",
3212                           zsp->z_start, zsp->z_cond);
3213                 break;
3214         }
3215         zsp->z_cond = ZC5_FULL;
3216 }
3217
3218 static void zbc_inc_wp(struct sdebug_dev_info *devip,
3219                        unsigned long long lba, unsigned int num)
3220 {
3221         struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3222         unsigned long long n, end, zend = zsp->z_start + zsp->z_size;
3223
3224         if (!zbc_zone_is_seq(zsp))
3225                 return;
3226
3227         if (zsp->z_type == ZBC_ZTYPE_SWR) {
3228                 zsp->z_wp += num;
3229                 if (zsp->z_wp >= zend)
3230                         zbc_set_zone_full(devip, zsp);
3231                 return;
3232         }
3233
3234         while (num) {
3235                 if (lba != zsp->z_wp)
3236                         zsp->z_non_seq_resource = true;
3237
3238                 end = lba + num;
3239                 if (end >= zend) {
3240                         n = zend - lba;
3241                         zsp->z_wp = zend;
3242                 } else if (end > zsp->z_wp) {
3243                         n = num;
3244                         zsp->z_wp = end;
3245                 } else {
3246                         n = num;
3247                 }
3248                 if (zsp->z_wp >= zend)
3249                         zbc_set_zone_full(devip, zsp);
3250
3251                 num -= n;
3252                 lba += n;
3253                 if (num) {
3254                         zsp++;
3255                         zend = zsp->z_start + zsp->z_size;
3256                 }
3257         }
3258 }
3259
3260 static int check_zbc_access_params(struct scsi_cmnd *scp,
3261                         unsigned long long lba, unsigned int num, bool write)
3262 {
3263         struct scsi_device *sdp = scp->device;
3264         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3265         struct sdeb_zone_state *zsp = zbc_zone(devip, lba);
3266         struct sdeb_zone_state *zsp_end = zbc_zone(devip, lba + num - 1);
3267
3268         if (!write) {
3269                 /* For host-managed, reads cannot cross zone types boundaries */
3270                 if (zsp->z_type != zsp_end->z_type) {
3271                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
3272                                         LBA_OUT_OF_RANGE,
3273                                         READ_INVDATA_ASCQ);
3274                         return check_condition_result;
3275                 }
3276                 return 0;
3277         }
3278
3279         /* Writing into a gap zone is not allowed */
3280         if (zbc_zone_is_gap(zsp)) {
3281                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE,
3282                                 ATTEMPT_ACCESS_GAP);
3283                 return check_condition_result;
3284         }
3285
3286         /* No restrictions for writes within conventional zones */
3287         if (zbc_zone_is_conv(zsp)) {
3288                 if (!zbc_zone_is_conv(zsp_end)) {
3289                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
3290                                         LBA_OUT_OF_RANGE,
3291                                         WRITE_BOUNDARY_ASCQ);
3292                         return check_condition_result;
3293                 }
3294                 return 0;
3295         }
3296
3297         if (zsp->z_type == ZBC_ZTYPE_SWR) {
3298                 /* Writes cannot cross sequential zone boundaries */
3299                 if (zsp_end != zsp) {
3300                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
3301                                         LBA_OUT_OF_RANGE,
3302                                         WRITE_BOUNDARY_ASCQ);
3303                         return check_condition_result;
3304                 }
3305                 /* Cannot write full zones */
3306                 if (zsp->z_cond == ZC5_FULL) {
3307                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
3308                                         INVALID_FIELD_IN_CDB, 0);
3309                         return check_condition_result;
3310                 }
3311                 /* Writes must be aligned to the zone WP */
3312                 if (lba != zsp->z_wp) {
3313                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
3314                                         LBA_OUT_OF_RANGE,
3315                                         UNALIGNED_WRITE_ASCQ);
3316                         return check_condition_result;
3317                 }
3318         }
3319
3320         /* Handle implicit open of closed and empty zones */
3321         if (zsp->z_cond == ZC1_EMPTY || zsp->z_cond == ZC4_CLOSED) {
3322                 if (devip->max_open &&
3323                     devip->nr_exp_open >= devip->max_open) {
3324                         mk_sense_buffer(scp, DATA_PROTECT,
3325                                         INSUFF_RES_ASC,
3326                                         INSUFF_ZONE_ASCQ);
3327                         return check_condition_result;
3328                 }
3329                 zbc_open_zone(devip, zsp, false);
3330         }
3331
3332         return 0;
3333 }
3334
3335 static inline int check_device_access_params
3336                         (struct scsi_cmnd *scp, unsigned long long lba,
3337                          unsigned int num, bool write)
3338 {
3339         struct scsi_device *sdp = scp->device;
3340         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
3341
3342         if (lba + num > sdebug_capacity) {
3343                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
3344                 return check_condition_result;
3345         }
3346         /* transfer length excessive (tie in to block limits VPD page) */
3347         if (num > sdebug_store_sectors) {
3348                 /* needs work to find which cdb byte 'num' comes from */
3349                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
3350                 return check_condition_result;
3351         }
3352         if (write && unlikely(sdebug_wp)) {
3353                 mk_sense_buffer(scp, DATA_PROTECT, WRITE_PROTECTED, 0x2);
3354                 return check_condition_result;
3355         }
3356         if (sdebug_dev_is_zoned(devip))
3357                 return check_zbc_access_params(scp, lba, num, write);
3358
3359         return 0;
3360 }
3361
3362 /*
3363  * Note: if BUG_ON() fires it usually indicates a problem with the parser
3364  * tables. Perhaps a missing F_FAKE_RW or FF_MEDIA_IO flag. Response functions
3365  * that access any of the "stores" in struct sdeb_store_info should call this
3366  * function with bug_if_fake_rw set to true.
3367  */
3368 static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
3369                                                 bool bug_if_fake_rw)
3370 {
3371         if (sdebug_fake_rw) {
3372                 BUG_ON(bug_if_fake_rw); /* See note above */
3373                 return NULL;
3374         }
3375         return xa_load(per_store_ap, devip->sdbg_host->si_idx);
3376 }
3377
3378 /* Returns number of bytes copied or -1 if error. */
3379 static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
3380                             u32 sg_skip, u64 lba, u32 num, bool do_write)
3381 {
3382         int ret;
3383         u64 block, rest = 0;
3384         enum dma_data_direction dir;
3385         struct scsi_data_buffer *sdb = &scp->sdb;
3386         u8 *fsp;
3387
3388         if (do_write) {
3389                 dir = DMA_TO_DEVICE;
3390                 write_since_sync = true;
3391         } else {
3392                 dir = DMA_FROM_DEVICE;
3393         }
3394
3395         if (!sdb->length || !sip)
3396                 return 0;
3397         if (scp->sc_data_direction != dir)
3398                 return -1;
3399         fsp = sip->storep;
3400
3401         block = do_div(lba, sdebug_store_sectors);
3402         if (block + num > sdebug_store_sectors)
3403                 rest = block + num - sdebug_store_sectors;
3404
3405         ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3406                    fsp + (block * sdebug_sector_size),
3407                    (num - rest) * sdebug_sector_size, sg_skip, do_write);
3408         if (ret != (num - rest) * sdebug_sector_size)
3409                 return ret;
3410
3411         if (rest) {
3412                 ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents,
3413                             fsp, rest * sdebug_sector_size,
3414                             sg_skip + ((num - rest) * sdebug_sector_size),
3415                             do_write);
3416         }
3417
3418         return ret;
3419 }
3420
3421 /* Returns number of bytes copied or -1 if error. */
3422 static int do_dout_fetch(struct scsi_cmnd *scp, u32 num, u8 *doutp)
3423 {
3424         struct scsi_data_buffer *sdb = &scp->sdb;
3425
3426         if (!sdb->length)
3427                 return 0;
3428         if (scp->sc_data_direction != DMA_TO_DEVICE)
3429                 return -1;
3430         return sg_copy_buffer(sdb->table.sgl, sdb->table.nents, doutp,
3431                               num * sdebug_sector_size, 0, true);
3432 }
3433
3434 /* If sip->storep+lba compares equal to arr(num), then copy top half of
3435  * arr into sip->storep+lba and return true. If comparison fails then
3436  * return false. */
3437 static bool comp_write_worker(struct sdeb_store_info *sip, u64 lba, u32 num,
3438                               const u8 *arr, bool compare_only)
3439 {
3440         bool res;
3441         u64 block, rest = 0;
3442         u32 store_blks = sdebug_store_sectors;
3443         u32 lb_size = sdebug_sector_size;
3444         u8 *fsp = sip->storep;
3445
3446         block = do_div(lba, store_blks);
3447         if (block + num > store_blks)
3448                 rest = block + num - store_blks;
3449
3450         res = !memcmp(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3451         if (!res)
3452                 return res;
3453         if (rest)
3454                 res = memcmp(fsp, arr + ((num - rest) * lb_size),
3455                              rest * lb_size);
3456         if (!res)
3457                 return res;
3458         if (compare_only)
3459                 return true;
3460         arr += num * lb_size;
3461         memcpy(fsp + (block * lb_size), arr, (num - rest) * lb_size);
3462         if (rest)
3463                 memcpy(fsp, arr + ((num - rest) * lb_size), rest * lb_size);
3464         return res;
3465 }
3466
3467 static __be16 dif_compute_csum(const void *buf, int len)
3468 {
3469         __be16 csum;
3470
3471         if (sdebug_guard)
3472                 csum = (__force __be16)ip_compute_csum(buf, len);
3473         else
3474                 csum = cpu_to_be16(crc_t10dif(buf, len));
3475
3476         return csum;
3477 }
3478
3479 static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
3480                       sector_t sector, u32 ei_lba)
3481 {
3482         __be16 csum = dif_compute_csum(data, sdebug_sector_size);
3483
3484         if (sdt->guard_tag != csum) {
3485                 pr_err("GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n",
3486                         (unsigned long)sector,
3487                         be16_to_cpu(sdt->guard_tag),
3488                         be16_to_cpu(csum));
3489                 return 0x01;
3490         }
3491         if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
3492             be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
3493                 pr_err("REF check failed on sector %lu\n",
3494                         (unsigned long)sector);
3495                 return 0x03;
3496         }
3497         if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3498             be32_to_cpu(sdt->ref_tag) != ei_lba) {
3499                 pr_err("REF check failed on sector %lu\n",
3500                         (unsigned long)sector);
3501                 return 0x03;
3502         }
3503         return 0;
3504 }
3505
3506 static void dif_copy_prot(struct scsi_cmnd *scp, sector_t sector,
3507                           unsigned int sectors, bool read)
3508 {
3509         size_t resid;
3510         void *paddr;
3511         struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3512                                                 scp->device->hostdata, true);
3513         struct t10_pi_tuple *dif_storep = sip->dif_storep;
3514         const void *dif_store_end = dif_storep + sdebug_store_sectors;
3515         struct sg_mapping_iter miter;
3516
3517         /* Bytes of protection data to copy into sgl */
3518         resid = sectors * sizeof(*dif_storep);
3519
3520         sg_miter_start(&miter, scsi_prot_sglist(scp),
3521                        scsi_prot_sg_count(scp), SG_MITER_ATOMIC |
3522                        (read ? SG_MITER_TO_SG : SG_MITER_FROM_SG));
3523
3524         while (sg_miter_next(&miter) && resid > 0) {
3525                 size_t len = min_t(size_t, miter.length, resid);
3526                 void *start = dif_store(sip, sector);
3527                 size_t rest = 0;
3528
3529                 if (dif_store_end < start + len)
3530                         rest = start + len - dif_store_end;
3531
3532                 paddr = miter.addr;
3533
3534                 if (read)
3535                         memcpy(paddr, start, len - rest);
3536                 else
3537                         memcpy(start, paddr, len - rest);
3538
3539                 if (rest) {
3540                         if (read)
3541                                 memcpy(paddr + len - rest, dif_storep, rest);
3542                         else
3543                                 memcpy(dif_storep, paddr + len - rest, rest);
3544                 }
3545
3546                 sector += len / sizeof(*dif_storep);
3547                 resid -= len;
3548         }
3549         sg_miter_stop(&miter);
3550 }
3551
3552 static int prot_verify_read(struct scsi_cmnd *scp, sector_t start_sec,
3553                             unsigned int sectors, u32 ei_lba)
3554 {
3555         int ret = 0;
3556         unsigned int i;
3557         sector_t sector;
3558         struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
3559                                                 scp->device->hostdata, true);
3560         struct t10_pi_tuple *sdt;
3561
3562         for (i = 0; i < sectors; i++, ei_lba++) {
3563                 sector = start_sec + i;
3564                 sdt = dif_store(sip, sector);
3565
3566                 if (sdt->app_tag == cpu_to_be16(0xffff))
3567                         continue;
3568
3569                 /*
3570                  * Because scsi_debug acts as both initiator and
3571                  * target we proceed to verify the PI even if
3572                  * RDPROTECT=3. This is done so the "initiator" knows
3573                  * which type of error to return. Otherwise we would
3574                  * have to iterate over the PI twice.
3575                  */
3576                 if (scp->cmnd[1] >> 5) { /* RDPROTECT */
3577                         ret = dif_verify(sdt, lba2fake_store(sip, sector),
3578                                          sector, ei_lba);
3579                         if (ret) {
3580                                 dif_errors++;
3581                                 break;
3582                         }
3583                 }
3584         }
3585
3586         dif_copy_prot(scp, start_sec, sectors, true);
3587         dix_reads++;
3588
3589         return ret;
3590 }
3591
3592 static inline void
3593 sdeb_read_lock(struct sdeb_store_info *sip)
3594 {
3595         if (sdebug_no_rwlock) {
3596                 if (sip)
3597                         __acquire(&sip->macc_lck);
3598                 else
3599                         __acquire(&sdeb_fake_rw_lck);
3600         } else {
3601                 if (sip)
3602                         read_lock(&sip->macc_lck);
3603                 else
3604                         read_lock(&sdeb_fake_rw_lck);
3605         }
3606 }
3607
3608 static inline void
3609 sdeb_read_unlock(struct sdeb_store_info *sip)
3610 {
3611         if (sdebug_no_rwlock) {
3612                 if (sip)
3613                         __release(&sip->macc_lck);
3614                 else
3615                         __release(&sdeb_fake_rw_lck);
3616         } else {
3617                 if (sip)
3618                         read_unlock(&sip->macc_lck);
3619                 else
3620                         read_unlock(&sdeb_fake_rw_lck);
3621         }
3622 }
3623
3624 static inline void
3625 sdeb_write_lock(struct sdeb_store_info *sip)
3626 {
3627         if (sdebug_no_rwlock) {
3628                 if (sip)
3629                         __acquire(&sip->macc_lck);
3630                 else
3631                         __acquire(&sdeb_fake_rw_lck);
3632         } else {
3633                 if (sip)
3634                         write_lock(&sip->macc_lck);
3635                 else
3636                         write_lock(&sdeb_fake_rw_lck);
3637         }
3638 }
3639
3640 static inline void
3641 sdeb_write_unlock(struct sdeb_store_info *sip)
3642 {
3643         if (sdebug_no_rwlock) {
3644                 if (sip)
3645                         __release(&sip->macc_lck);
3646                 else
3647                         __release(&sdeb_fake_rw_lck);
3648         } else {
3649                 if (sip)
3650                         write_unlock(&sip->macc_lck);
3651                 else
3652                         write_unlock(&sdeb_fake_rw_lck);
3653         }
3654 }
3655
3656 static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3657 {
3658         bool check_prot;
3659         u32 num;
3660         u32 ei_lba;
3661         int ret;
3662         u64 lba;
3663         struct sdeb_store_info *sip = devip2sip(devip, true);
3664         u8 *cmd = scp->cmnd;
3665
3666         switch (cmd[0]) {
3667         case READ_16:
3668                 ei_lba = 0;
3669                 lba = get_unaligned_be64(cmd + 2);
3670                 num = get_unaligned_be32(cmd + 10);
3671                 check_prot = true;
3672                 break;
3673         case READ_10:
3674                 ei_lba = 0;
3675                 lba = get_unaligned_be32(cmd + 2);
3676                 num = get_unaligned_be16(cmd + 7);
3677                 check_prot = true;
3678                 break;
3679         case READ_6:
3680                 ei_lba = 0;
3681                 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3682                       (u32)(cmd[1] & 0x1f) << 16;
3683                 num = (0 == cmd[4]) ? 256 : cmd[4];
3684                 check_prot = true;
3685                 break;
3686         case READ_12:
3687                 ei_lba = 0;
3688                 lba = get_unaligned_be32(cmd + 2);
3689                 num = get_unaligned_be32(cmd + 6);
3690                 check_prot = true;
3691                 break;
3692         case XDWRITEREAD_10:
3693                 ei_lba = 0;
3694                 lba = get_unaligned_be32(cmd + 2);
3695                 num = get_unaligned_be16(cmd + 7);
3696                 check_prot = false;
3697                 break;
3698         default:        /* assume READ(32) */
3699                 lba = get_unaligned_be64(cmd + 12);
3700                 ei_lba = get_unaligned_be32(cmd + 20);
3701                 num = get_unaligned_be32(cmd + 28);
3702                 check_prot = false;
3703                 break;
3704         }
3705         if (unlikely(have_dif_prot && check_prot)) {
3706                 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3707                     (cmd[1] & 0xe0)) {
3708                         mk_sense_invalid_opcode(scp);
3709                         return check_condition_result;
3710                 }
3711                 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3712                      sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3713                     (cmd[1] & 0xe0) == 0)
3714                         sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
3715                                     "to DIF device\n");
3716         }
3717         if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
3718                      atomic_read(&sdeb_inject_pending))) {
3719                 num /= 2;
3720                 atomic_set(&sdeb_inject_pending, 0);
3721         }
3722
3723         ret = check_device_access_params(scp, lba, num, false);
3724         if (ret)
3725                 return ret;
3726         if (unlikely((SDEBUG_OPT_MEDIUM_ERR & sdebug_opts) &&
3727                      (lba <= (sdebug_medium_error_start + sdebug_medium_error_count - 1)) &&
3728                      ((lba + num) > sdebug_medium_error_start))) {
3729                 /* claim unrecoverable read error */
3730                 mk_sense_buffer(scp, MEDIUM_ERROR, UNRECOVERED_READ_ERR, 0);
3731                 /* set info field and valid bit for fixed descriptor */
3732                 if (0x70 == (scp->sense_buffer[0] & 0x7f)) {
3733                         scp->sense_buffer[0] |= 0x80;   /* Valid bit */
3734                         ret = (lba < OPT_MEDIUM_ERR_ADDR)
3735                               ? OPT_MEDIUM_ERR_ADDR : (int)lba;
3736                         put_unaligned_be32(ret, scp->sense_buffer + 3);
3737                 }
3738                 scsi_set_resid(scp, scsi_bufflen(scp));
3739                 return check_condition_result;
3740         }
3741
3742         sdeb_read_lock(sip);
3743
3744         /* DIX + T10 DIF */
3745         if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
3746                 switch (prot_verify_read(scp, lba, num, ei_lba)) {
3747                 case 1: /* Guard tag error */
3748                         if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3749                                 sdeb_read_unlock(sip);
3750                                 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3751                                 return check_condition_result;
3752                         } else if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
3753                                 sdeb_read_unlock(sip);
3754                                 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3755                                 return illegal_condition_result;
3756                         }
3757                         break;
3758                 case 3: /* Reference tag error */
3759                         if (cmd[1] >> 5 != 3) { /* RDPROTECT != 3 */
3760                                 sdeb_read_unlock(sip);
3761                                 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
3762                                 return check_condition_result;
3763                         } else if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
3764                                 sdeb_read_unlock(sip);
3765                                 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
3766                                 return illegal_condition_result;
3767                         }
3768                         break;
3769                 }
3770         }
3771
3772         ret = do_device_access(sip, scp, 0, lba, num, false);
3773         sdeb_read_unlock(sip);
3774         if (unlikely(ret == -1))
3775                 return DID_ERROR << 16;
3776
3777         scsi_set_resid(scp, scsi_bufflen(scp) - ret);
3778
3779         if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
3780                      atomic_read(&sdeb_inject_pending))) {
3781                 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
3782                         mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
3783                         atomic_set(&sdeb_inject_pending, 0);
3784                         return check_condition_result;
3785                 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
3786                         /* Logical block guard check failed */
3787                         mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
3788                         atomic_set(&sdeb_inject_pending, 0);
3789                         return illegal_condition_result;
3790                 } else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
3791                         mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
3792                         atomic_set(&sdeb_inject_pending, 0);
3793                         return illegal_condition_result;
3794                 }
3795         }
3796         return 0;
3797 }
3798
3799 static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
3800                              unsigned int sectors, u32 ei_lba)
3801 {
3802         int ret;
3803         struct t10_pi_tuple *sdt;
3804         void *daddr;
3805         sector_t sector = start_sec;
3806         int ppage_offset;
3807         int dpage_offset;
3808         struct sg_mapping_iter diter;
3809         struct sg_mapping_iter piter;
3810
3811         BUG_ON(scsi_sg_count(SCpnt) == 0);
3812         BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
3813
3814         sg_miter_start(&piter, scsi_prot_sglist(SCpnt),
3815                         scsi_prot_sg_count(SCpnt),
3816                         SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3817         sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt),
3818                         SG_MITER_ATOMIC | SG_MITER_FROM_SG);
3819
3820         /* For each protection page */
3821         while (sg_miter_next(&piter)) {
3822                 dpage_offset = 0;
3823                 if (WARN_ON(!sg_miter_next(&diter))) {
3824                         ret = 0x01;
3825                         goto out;
3826                 }
3827
3828                 for (ppage_offset = 0; ppage_offset < piter.length;
3829                      ppage_offset += sizeof(struct t10_pi_tuple)) {
3830                         /* If we're at the end of the current
3831                          * data page advance to the next one
3832                          */
3833                         if (dpage_offset >= diter.length) {
3834                                 if (WARN_ON(!sg_miter_next(&diter))) {
3835                                         ret = 0x01;
3836                                         goto out;
3837                                 }
3838                                 dpage_offset = 0;
3839                         }
3840
3841                         sdt = piter.addr + ppage_offset;
3842                         daddr = diter.addr + dpage_offset;
3843
3844                         if (SCpnt->cmnd[1] >> 5 != 3) { /* WRPROTECT */
3845                                 ret = dif_verify(sdt, daddr, sector, ei_lba);
3846                                 if (ret)
3847                                         goto out;
3848                         }
3849
3850                         sector++;
3851                         ei_lba++;
3852                         dpage_offset += sdebug_sector_size;
3853                 }
3854                 diter.consumed = dpage_offset;
3855                 sg_miter_stop(&diter);
3856         }
3857         sg_miter_stop(&piter);
3858
3859         dif_copy_prot(SCpnt, start_sec, sectors, false);
3860         dix_writes++;
3861
3862         return 0;
3863
3864 out:
3865         dif_errors++;
3866         sg_miter_stop(&diter);
3867         sg_miter_stop(&piter);
3868         return ret;
3869 }
3870
3871 static unsigned long lba_to_map_index(sector_t lba)
3872 {
3873         if (sdebug_unmap_alignment)
3874                 lba += sdebug_unmap_granularity - sdebug_unmap_alignment;
3875         sector_div(lba, sdebug_unmap_granularity);
3876         return lba;
3877 }
3878
3879 static sector_t map_index_to_lba(unsigned long index)
3880 {
3881         sector_t lba = index * sdebug_unmap_granularity;
3882
3883         if (sdebug_unmap_alignment)
3884                 lba -= sdebug_unmap_granularity - sdebug_unmap_alignment;
3885         return lba;
3886 }
3887
3888 static unsigned int map_state(struct sdeb_store_info *sip, sector_t lba,
3889                               unsigned int *num)
3890 {
3891         sector_t end;
3892         unsigned int mapped;
3893         unsigned long index;
3894         unsigned long next;
3895
3896         index = lba_to_map_index(lba);
3897         mapped = test_bit(index, sip->map_storep);
3898
3899         if (mapped)
3900                 next = find_next_zero_bit(sip->map_storep, map_size, index);
3901         else
3902                 next = find_next_bit(sip->map_storep, map_size, index);
3903
3904         end = min_t(sector_t, sdebug_store_sectors,  map_index_to_lba(next));
3905         *num = end - lba;
3906         return mapped;
3907 }
3908
3909 static void map_region(struct sdeb_store_info *sip, sector_t lba,
3910                        unsigned int len)
3911 {
3912         sector_t end = lba + len;
3913
3914         while (lba < end) {
3915                 unsigned long index = lba_to_map_index(lba);
3916
3917                 if (index < map_size)
3918                         set_bit(index, sip->map_storep);
3919
3920                 lba = map_index_to_lba(index + 1);
3921         }
3922 }
3923
3924 static void unmap_region(struct sdeb_store_info *sip, sector_t lba,
3925                          unsigned int len)
3926 {
3927         sector_t end = lba + len;
3928         u8 *fsp = sip->storep;
3929
3930         while (lba < end) {
3931                 unsigned long index = lba_to_map_index(lba);
3932
3933                 if (lba == map_index_to_lba(index) &&
3934                     lba + sdebug_unmap_granularity <= end &&
3935                     index < map_size) {
3936                         clear_bit(index, sip->map_storep);
3937                         if (sdebug_lbprz) {  /* for LBPRZ=2 return 0xff_s */
3938                                 memset(fsp + lba * sdebug_sector_size,
3939                                        (sdebug_lbprz & 1) ? 0 : 0xff,
3940                                        sdebug_sector_size *
3941                                        sdebug_unmap_granularity);
3942                         }
3943                         if (sip->dif_storep) {
3944                                 memset(sip->dif_storep + lba, 0xff,
3945                                        sizeof(*sip->dif_storep) *
3946                                        sdebug_unmap_granularity);
3947                         }
3948                 }
3949                 lba = map_index_to_lba(index + 1);
3950         }
3951 }
3952
3953 static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
3954 {
3955         bool check_prot;
3956         u32 num;
3957         u32 ei_lba;
3958         int ret;
3959         u64 lba;
3960         struct sdeb_store_info *sip = devip2sip(devip, true);
3961         u8 *cmd = scp->cmnd;
3962
3963         switch (cmd[0]) {
3964         case WRITE_16:
3965                 ei_lba = 0;
3966                 lba = get_unaligned_be64(cmd + 2);
3967                 num = get_unaligned_be32(cmd + 10);
3968                 check_prot = true;
3969                 break;
3970         case WRITE_10:
3971                 ei_lba = 0;
3972                 lba = get_unaligned_be32(cmd + 2);
3973                 num = get_unaligned_be16(cmd + 7);
3974                 check_prot = true;
3975                 break;
3976         case WRITE_6:
3977                 ei_lba = 0;
3978                 lba = (u32)cmd[3] | (u32)cmd[2] << 8 |
3979                       (u32)(cmd[1] & 0x1f) << 16;
3980                 num = (0 == cmd[4]) ? 256 : cmd[4];
3981                 check_prot = true;
3982                 break;
3983         case WRITE_12:
3984                 ei_lba = 0;
3985                 lba = get_unaligned_be32(cmd + 2);
3986                 num = get_unaligned_be32(cmd + 6);
3987                 check_prot = true;
3988                 break;
3989         case 0x53:      /* XDWRITEREAD(10) */
3990                 ei_lba = 0;
3991                 lba = get_unaligned_be32(cmd + 2);
3992                 num = get_unaligned_be16(cmd + 7);
3993                 check_prot = false;
3994                 break;
3995         default:        /* assume WRITE(32) */
3996                 lba = get_unaligned_be64(cmd + 12);
3997                 ei_lba = get_unaligned_be32(cmd + 20);
3998                 num = get_unaligned_be32(cmd + 28);
3999                 check_prot = false;
4000                 break;
4001         }
4002         if (unlikely(have_dif_prot && check_prot)) {
4003                 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4004                     (cmd[1] & 0xe0)) {
4005                         mk_sense_invalid_opcode(scp);
4006                         return check_condition_result;
4007                 }
4008                 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4009                      sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4010                     (cmd[1] & 0xe0) == 0)
4011                         sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4012                                     "to DIF device\n");
4013         }
4014
4015         sdeb_write_lock(sip);
4016         ret = check_device_access_params(scp, lba, num, true);
4017         if (ret) {
4018                 sdeb_write_unlock(sip);
4019                 return ret;
4020         }
4021
4022         /* DIX + T10 DIF */
4023         if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
4024                 switch (prot_verify_write(scp, lba, num, ei_lba)) {
4025                 case 1: /* Guard tag error */
4026                         if (scp->prot_flags & SCSI_PROT_GUARD_CHECK) {
4027                                 sdeb_write_unlock(sip);
4028                                 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4029                                 return illegal_condition_result;
4030                         } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
4031                                 sdeb_write_unlock(sip);
4032                                 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4033                                 return check_condition_result;
4034                         }
4035                         break;
4036                 case 3: /* Reference tag error */
4037                         if (scp->prot_flags & SCSI_PROT_REF_CHECK) {
4038                                 sdeb_write_unlock(sip);
4039                                 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 3);
4040                                 return illegal_condition_result;
4041                         } else if (scp->cmnd[1] >> 5 != 3) { /* WRPROTECT != 3 */
4042                                 sdeb_write_unlock(sip);
4043                                 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 3);
4044                                 return check_condition_result;
4045                         }
4046                         break;
4047                 }
4048         }
4049
4050         ret = do_device_access(sip, scp, 0, lba, num, true);
4051         if (unlikely(scsi_debug_lbp()))
4052                 map_region(sip, lba, num);
4053         /* If ZBC zone then bump its write pointer */
4054         if (sdebug_dev_is_zoned(devip))
4055                 zbc_inc_wp(devip, lba, num);
4056         sdeb_write_unlock(sip);
4057         if (unlikely(-1 == ret))
4058                 return DID_ERROR << 16;
4059         else if (unlikely(sdebug_verbose &&
4060                           (ret < (num * sdebug_sector_size))))
4061                 sdev_printk(KERN_INFO, scp->device,
4062                             "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
4063                             my_name, num * sdebug_sector_size, ret);
4064
4065         if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
4066                      atomic_read(&sdeb_inject_pending))) {
4067                 if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
4068                         mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
4069                         atomic_set(&sdeb_inject_pending, 0);
4070                         return check_condition_result;
4071                 } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4072                         /* Logical block guard check failed */
4073                         mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4074                         atomic_set(&sdeb_inject_pending, 0);
4075                         return illegal_condition_result;
4076                 } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
4077                         mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4078                         atomic_set(&sdeb_inject_pending, 0);
4079                         return illegal_condition_result;
4080                 }
4081         }
4082         return 0;
4083 }
4084
4085 /*
4086  * T10 has only specified WRITE SCATTERED(16) and WRITE SCATTERED(32).
4087  * No READ GATHERED yet (requires bidi or long cdb holding gather list).
4088  */
4089 static int resp_write_scat(struct scsi_cmnd *scp,
4090                            struct sdebug_dev_info *devip)
4091 {
4092         u8 *cmd = scp->cmnd;
4093         u8 *lrdp = NULL;
4094         u8 *up;
4095         struct sdeb_store_info *sip = devip2sip(devip, true);
4096         u8 wrprotect;
4097         u16 lbdof, num_lrd, k;
4098         u32 num, num_by, bt_len, lbdof_blen, sg_off, cum_lb;
4099         u32 lb_size = sdebug_sector_size;
4100         u32 ei_lba;
4101         u64 lba;
4102         int ret, res;
4103         bool is_16;
4104         static const u32 lrd_size = 32; /* + parameter list header size */
4105
4106         if (cmd[0] == VARIABLE_LENGTH_CMD) {
4107                 is_16 = false;
4108                 wrprotect = (cmd[10] >> 5) & 0x7;
4109                 lbdof = get_unaligned_be16(cmd + 12);
4110                 num_lrd = get_unaligned_be16(cmd + 16);
4111                 bt_len = get_unaligned_be32(cmd + 28);
4112         } else {        /* that leaves WRITE SCATTERED(16) */
4113                 is_16 = true;
4114                 wrprotect = (cmd[2] >> 5) & 0x7;
4115                 lbdof = get_unaligned_be16(cmd + 4);
4116                 num_lrd = get_unaligned_be16(cmd + 8);
4117                 bt_len = get_unaligned_be32(cmd + 10);
4118                 if (unlikely(have_dif_prot)) {
4119                         if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4120                             wrprotect) {
4121                                 mk_sense_invalid_opcode(scp);
4122                                 return illegal_condition_result;
4123                         }
4124                         if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4125                              sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4126                              wrprotect == 0)
4127                                 sdev_printk(KERN_ERR, scp->device,
4128                                             "Unprotected WR to DIF device\n");
4129                 }
4130         }
4131         if ((num_lrd == 0) || (bt_len == 0))
4132                 return 0;       /* T10 says these do-nothings are not errors */
4133         if (lbdof == 0) {
4134                 if (sdebug_verbose)
4135                         sdev_printk(KERN_INFO, scp->device,
4136                                 "%s: %s: LB Data Offset field bad\n",
4137                                 my_name, __func__);
4138                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4139                 return illegal_condition_result;
4140         }
4141         lbdof_blen = lbdof * lb_size;
4142         if ((lrd_size + (num_lrd * lrd_size)) > lbdof_blen) {
4143                 if (sdebug_verbose)
4144                         sdev_printk(KERN_INFO, scp->device,
4145                                 "%s: %s: LBA range descriptors don't fit\n",
4146                                 my_name, __func__);
4147                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
4148                 return illegal_condition_result;
4149         }
4150         lrdp = kzalloc(lbdof_blen, GFP_ATOMIC | __GFP_NOWARN);
4151         if (lrdp == NULL)
4152                 return SCSI_MLQUEUE_HOST_BUSY;
4153         if (sdebug_verbose)
4154                 sdev_printk(KERN_INFO, scp->device,
4155                         "%s: %s: Fetch header+scatter_list, lbdof_blen=%u\n",
4156                         my_name, __func__, lbdof_blen);
4157         res = fetch_to_dev_buffer(scp, lrdp, lbdof_blen);
4158         if (res == -1) {
4159                 ret = DID_ERROR << 16;
4160                 goto err_out;
4161         }
4162
4163         sdeb_write_lock(sip);
4164         sg_off = lbdof_blen;
4165         /* Spec says Buffer xfer Length field in number of LBs in dout */
4166         cum_lb = 0;
4167         for (k = 0, up = lrdp + lrd_size; k < num_lrd; ++k, up += lrd_size) {
4168                 lba = get_unaligned_be64(up + 0);
4169                 num = get_unaligned_be32(up + 8);
4170                 if (sdebug_verbose)
4171                         sdev_printk(KERN_INFO, scp->device,
4172                                 "%s: %s: k=%d  LBA=0x%llx num=%u  sg_off=%u\n",
4173                                 my_name, __func__, k, lba, num, sg_off);
4174                 if (num == 0)
4175                         continue;
4176                 ret = check_device_access_params(scp, lba, num, true);
4177                 if (ret)
4178                         goto err_out_unlock;
4179                 num_by = num * lb_size;
4180                 ei_lba = is_16 ? 0 : get_unaligned_be32(up + 12);
4181
4182                 if ((cum_lb + num) > bt_len) {
4183                         if (sdebug_verbose)
4184                                 sdev_printk(KERN_INFO, scp->device,
4185                                     "%s: %s: sum of blocks > data provided\n",
4186                                     my_name, __func__);
4187                         mk_sense_buffer(scp, ILLEGAL_REQUEST, WRITE_ERROR_ASC,
4188                                         0);
4189                         ret = illegal_condition_result;
4190                         goto err_out_unlock;
4191                 }
4192
4193                 /* DIX + T10 DIF */
4194                 if (unlikely(sdebug_dix && scsi_prot_sg_count(scp))) {
4195                         int prot_ret = prot_verify_write(scp, lba, num,
4196                                                          ei_lba);
4197
4198                         if (prot_ret) {
4199                                 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10,
4200                                                 prot_ret);
4201                                 ret = illegal_condition_result;
4202                                 goto err_out_unlock;
4203                         }
4204                 }
4205
4206                 ret = do_device_access(sip, scp, sg_off, lba, num, true);
4207                 /* If ZBC zone then bump its write pointer */
4208                 if (sdebug_dev_is_zoned(devip))
4209                         zbc_inc_wp(devip, lba, num);
4210                 if (unlikely(scsi_debug_lbp()))
4211                         map_region(sip, lba, num);
4212                 if (unlikely(-1 == ret)) {
4213                         ret = DID_ERROR << 16;
4214                         goto err_out_unlock;
4215                 } else if (unlikely(sdebug_verbose && (ret < num_by)))
4216                         sdev_printk(KERN_INFO, scp->device,
4217                             "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
4218                             my_name, num_by, ret);
4219
4220                 if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
4221                              atomic_read(&sdeb_inject_pending))) {
4222                         if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
4223                                 mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
4224                                 atomic_set(&sdeb_inject_pending, 0);
4225                                 ret = check_condition_result;
4226                                 goto err_out_unlock;
4227                         } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
4228                                 /* Logical block guard check failed */
4229                                 mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
4230                                 atomic_set(&sdeb_inject_pending, 0);
4231                                 ret = illegal_condition_result;
4232                                 goto err_out_unlock;
4233                         } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
4234                                 mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
4235                                 atomic_set(&sdeb_inject_pending, 0);
4236                                 ret = illegal_condition_result;
4237                                 goto err_out_unlock;
4238                         }
4239                 }
4240                 sg_off += num_by;
4241                 cum_lb += num;
4242         }
4243         ret = 0;
4244 err_out_unlock:
4245         sdeb_write_unlock(sip);
4246 err_out:
4247         kfree(lrdp);
4248         return ret;
4249 }
4250
4251 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
4252                            u32 ei_lba, bool unmap, bool ndob)
4253 {
4254         struct scsi_device *sdp = scp->device;
4255         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
4256         unsigned long long i;
4257         u64 block, lbaa;
4258         u32 lb_size = sdebug_sector_size;
4259         int ret;
4260         struct sdeb_store_info *sip = devip2sip((struct sdebug_dev_info *)
4261                                                 scp->device->hostdata, true);
4262         u8 *fs1p;
4263         u8 *fsp;
4264
4265         sdeb_write_lock(sip);
4266
4267         ret = check_device_access_params(scp, lba, num, true);
4268         if (ret) {
4269                 sdeb_write_unlock(sip);
4270                 return ret;
4271         }
4272
4273         if (unmap && scsi_debug_lbp()) {
4274                 unmap_region(sip, lba, num);
4275                 goto out;
4276         }
4277         lbaa = lba;
4278         block = do_div(lbaa, sdebug_store_sectors);
4279         /* if ndob then zero 1 logical block, else fetch 1 logical block */
4280         fsp = sip->storep;
4281         fs1p = fsp + (block * lb_size);
4282         if (ndob) {
4283                 memset(fs1p, 0, lb_size);
4284                 ret = 0;
4285         } else
4286                 ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
4287
4288         if (-1 == ret) {
4289                 sdeb_write_unlock(sip);
4290                 return DID_ERROR << 16;
4291         } else if (sdebug_verbose && !ndob && (ret < lb_size))
4292                 sdev_printk(KERN_INFO, scp->device,
4293                             "%s: %s: lb size=%u, IO sent=%d bytes\n",
4294                             my_name, "write same", lb_size, ret);
4295
4296         /* Copy first sector to remaining blocks */
4297         for (i = 1 ; i < num ; i++) {
4298                 lbaa = lba + i;
4299                 block = do_div(lbaa, sdebug_store_sectors);
4300                 memmove(fsp + (block * lb_size), fs1p, lb_size);
4301         }
4302         if (scsi_debug_lbp())
4303                 map_region(sip, lba, num);
4304         /* If ZBC zone then bump its write pointer */
4305         if (sdebug_dev_is_zoned(devip))
4306                 zbc_inc_wp(devip, lba, num);
4307 out:
4308         sdeb_write_unlock(sip);
4309
4310         return 0;
4311 }
4312
4313 static int resp_write_same_10(struct scsi_cmnd *scp,
4314                               struct sdebug_dev_info *devip)
4315 {
4316         u8 *cmd = scp->cmnd;
4317         u32 lba;
4318         u16 num;
4319         u32 ei_lba = 0;
4320         bool unmap = false;
4321
4322         if (cmd[1] & 0x8) {
4323                 if (sdebug_lbpws10 == 0) {
4324                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
4325                         return check_condition_result;
4326                 } else
4327                         unmap = true;
4328         }
4329         lba = get_unaligned_be32(cmd + 2);
4330         num = get_unaligned_be16(cmd + 7);
4331         if (num > sdebug_write_same_length) {
4332                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4333                 return check_condition_result;
4334         }
4335         return resp_write_same(scp, lba, num, ei_lba, unmap, false);
4336 }
4337
4338 static int resp_write_same_16(struct scsi_cmnd *scp,
4339                               struct sdebug_dev_info *devip)
4340 {
4341         u8 *cmd = scp->cmnd;
4342         u64 lba;
4343         u32 num;
4344         u32 ei_lba = 0;
4345         bool unmap = false;
4346         bool ndob = false;
4347
4348         if (cmd[1] & 0x8) {     /* UNMAP */
4349                 if (sdebug_lbpws == 0) {
4350                         mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 3);
4351                         return check_condition_result;
4352                 } else
4353                         unmap = true;
4354         }
4355         if (cmd[1] & 0x1)  /* NDOB (no data-out buffer, assumes zeroes) */
4356                 ndob = true;
4357         lba = get_unaligned_be64(cmd + 2);
4358         num = get_unaligned_be32(cmd + 10);
4359         if (num > sdebug_write_same_length) {
4360                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
4361                 return check_condition_result;
4362         }
4363         return resp_write_same(scp, lba, num, ei_lba, unmap, ndob);
4364 }
4365
4366 /* Note the mode field is in the same position as the (lower) service action
4367  * field. For the Report supported operation codes command, SPC-4 suggests
4368  * each mode of this command should be reported separately; for future. */
4369 static int resp_write_buffer(struct scsi_cmnd *scp,
4370                              struct sdebug_dev_info *devip)
4371 {
4372         u8 *cmd = scp->cmnd;
4373         struct scsi_device *sdp = scp->device;
4374         struct sdebug_dev_info *dp;
4375         u8 mode;
4376
4377         mode = cmd[1] & 0x1f;
4378         switch (mode) {
4379         case 0x4:       /* download microcode (MC) and activate (ACT) */
4380                 /* set UAs on this device only */
4381                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
4382                 set_bit(SDEBUG_UA_MICROCODE_CHANGED, devip->uas_bm);
4383                 break;
4384         case 0x5:       /* download MC, save and ACT */
4385                 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET, devip->uas_bm);
4386                 break;
4387         case 0x6:       /* download MC with offsets and ACT */
4388                 /* set UAs on most devices (LUs) in this target */
4389                 list_for_each_entry(dp,
4390                                     &devip->sdbg_host->dev_info_list,
4391                                     dev_list)
4392                         if (dp->target == sdp->id) {
4393                                 set_bit(SDEBUG_UA_BUS_RESET, dp->uas_bm);
4394                                 if (devip != dp)
4395                                         set_bit(SDEBUG_UA_MICROCODE_CHANGED,
4396                                                 dp->uas_bm);
4397                         }
4398                 break;
4399         case 0x7:       /* download MC with offsets, save, and ACT */
4400                 /* set UA on all devices (LUs) in this target */
4401                 list_for_each_entry(dp,
4402                                     &devip->sdbg_host->dev_info_list,
4403                                     dev_list)
4404                         if (dp->target == sdp->id)
4405                                 set_bit(SDEBUG_UA_MICROCODE_CHANGED_WO_RESET,
4406                                         dp->uas_bm);
4407                 break;
4408         default:
4409                 /* do nothing for this command for other mode values */
4410                 break;
4411         }
4412         return 0;
4413 }
4414
4415 static int resp_comp_write(struct scsi_cmnd *scp,
4416                            struct sdebug_dev_info *devip)
4417 {
4418         u8 *cmd = scp->cmnd;
4419         u8 *arr;
4420         struct sdeb_store_info *sip = devip2sip(devip, true);
4421         u64 lba;
4422         u32 dnum;
4423         u32 lb_size = sdebug_sector_size;
4424         u8 num;
4425         int ret;
4426         int retval = 0;
4427
4428         lba = get_unaligned_be64(cmd + 2);
4429         num = cmd[13];          /* 1 to a maximum of 255 logical blocks */
4430         if (0 == num)
4431                 return 0;       /* degenerate case, not an error */
4432         if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
4433             (cmd[1] & 0xe0)) {
4434                 mk_sense_invalid_opcode(scp);
4435                 return check_condition_result;
4436         }
4437         if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
4438              sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
4439             (cmd[1] & 0xe0) == 0)
4440                 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
4441                             "to DIF device\n");
4442         ret = check_device_access_params(scp, lba, num, false);
4443         if (ret)
4444                 return ret;
4445         dnum = 2 * num;
4446         arr = kcalloc(lb_size, dnum, GFP_ATOMIC);
4447         if (NULL == arr) {
4448                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4449                                 INSUFF_RES_ASCQ);
4450                 return check_condition_result;
4451         }
4452
4453         sdeb_write_lock(sip);
4454
4455         ret = do_dout_fetch(scp, dnum, arr);
4456         if (ret == -1) {
4457                 retval = DID_ERROR << 16;
4458                 goto cleanup;
4459         } else if (sdebug_verbose && (ret < (dnum * lb_size)))
4460                 sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
4461                             "indicated=%u, IO sent=%d bytes\n", my_name,
4462                             dnum * lb_size, ret);
4463         if (!comp_write_worker(sip, lba, num, arr, false)) {
4464                 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4465                 retval = check_condition_result;
4466                 goto cleanup;
4467         }
4468         if (scsi_debug_lbp())
4469                 map_region(sip, lba, num);
4470 cleanup:
4471         sdeb_write_unlock(sip);
4472         kfree(arr);
4473         return retval;
4474 }
4475
4476 struct unmap_block_desc {
4477         __be64  lba;
4478         __be32  blocks;
4479         __be32  __reserved;
4480 };
4481
4482 static int resp_unmap(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4483 {
4484         unsigned char *buf;
4485         struct unmap_block_desc *desc;
4486         struct sdeb_store_info *sip = devip2sip(devip, true);
4487         unsigned int i, payload_len, descriptors;
4488         int ret;
4489
4490         if (!scsi_debug_lbp())
4491                 return 0;       /* fib and say its done */
4492         payload_len = get_unaligned_be16(scp->cmnd + 7);
4493         BUG_ON(scsi_bufflen(scp) != payload_len);
4494
4495         descriptors = (payload_len - 8) / 16;
4496         if (descriptors > sdebug_unmap_max_desc) {
4497                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 7, -1);
4498                 return check_condition_result;
4499         }
4500
4501         buf = kzalloc(scsi_bufflen(scp), GFP_ATOMIC);
4502         if (!buf) {
4503                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4504                                 INSUFF_RES_ASCQ);
4505                 return check_condition_result;
4506         }
4507
4508         scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));
4509
4510         BUG_ON(get_unaligned_be16(&buf[0]) != payload_len - 2);
4511         BUG_ON(get_unaligned_be16(&buf[2]) != descriptors * 16);
4512
4513         desc = (void *)&buf[8];
4514
4515         sdeb_write_lock(sip);
4516
4517         for (i = 0 ; i < descriptors ; i++) {
4518                 unsigned long long lba = get_unaligned_be64(&desc[i].lba);
4519                 unsigned int num = get_unaligned_be32(&desc[i].blocks);
4520
4521                 ret = check_device_access_params(scp, lba, num, true);
4522                 if (ret)
4523                         goto out;
4524
4525                 unmap_region(sip, lba, num);
4526         }
4527
4528         ret = 0;
4529
4530 out:
4531         sdeb_write_unlock(sip);
4532         kfree(buf);
4533
4534         return ret;
4535 }
4536
4537 #define SDEBUG_GET_LBA_STATUS_LEN 32
4538
4539 static int resp_get_lba_status(struct scsi_cmnd *scp,
4540                                struct sdebug_dev_info *devip)
4541 {
4542         u8 *cmd = scp->cmnd;
4543         u64 lba;
4544         u32 alloc_len, mapped, num;
4545         int ret;
4546         u8 arr[SDEBUG_GET_LBA_STATUS_LEN];
4547
4548         lba = get_unaligned_be64(cmd + 2);
4549         alloc_len = get_unaligned_be32(cmd + 10);
4550
4551         if (alloc_len < 24)
4552                 return 0;
4553
4554         ret = check_device_access_params(scp, lba, 1, false);
4555         if (ret)
4556                 return ret;
4557
4558         if (scsi_debug_lbp()) {
4559                 struct sdeb_store_info *sip = devip2sip(devip, true);
4560
4561                 mapped = map_state(sip, lba, &num);
4562         } else {
4563                 mapped = 1;
4564                 /* following just in case virtual_gb changed */
4565                 sdebug_capacity = get_sdebug_capacity();
4566                 if (sdebug_capacity - lba <= 0xffffffff)
4567                         num = sdebug_capacity - lba;
4568                 else
4569                         num = 0xffffffff;
4570         }
4571
4572         memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
4573         put_unaligned_be32(20, arr);            /* Parameter Data Length */
4574         put_unaligned_be64(lba, arr + 8);       /* LBA */
4575         put_unaligned_be32(num, arr + 16);      /* Number of blocks */
4576         arr[20] = !mapped;              /* prov_stat=0: mapped; 1: dealloc */
4577
4578         return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
4579 }
4580
4581 static int resp_get_stream_status(struct scsi_cmnd *scp,
4582                                   struct sdebug_dev_info *devip)
4583 {
4584         u16 starting_stream_id, stream_id;
4585         const u8 *cmd = scp->cmnd;
4586         u32 alloc_len, offset;
4587         u8 arr[256] = {};
4588         struct scsi_stream_status_header *h = (void *)arr;
4589
4590         starting_stream_id = get_unaligned_be16(cmd + 4);
4591         alloc_len = get_unaligned_be32(cmd + 10);
4592
4593         if (alloc_len < 8) {
4594                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
4595                 return check_condition_result;
4596         }
4597
4598         if (starting_stream_id >= MAXIMUM_NUMBER_OF_STREAMS) {
4599                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
4600                 return check_condition_result;
4601         }
4602
4603         /*
4604          * The GET STREAM STATUS command only reports status information
4605          * about open streams. Treat the non-permanent stream as open.
4606          */
4607         put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS,
4608                            &h->number_of_open_streams);
4609
4610         for (offset = 8, stream_id = starting_stream_id;
4611              offset + 8 <= min_t(u32, alloc_len, sizeof(arr)) &&
4612                      stream_id < MAXIMUM_NUMBER_OF_STREAMS;
4613              offset += 8, stream_id++) {
4614                 struct scsi_stream_status *stream_status = (void *)arr + offset;
4615
4616                 stream_status->perm = stream_id < PERMANENT_STREAM_COUNT;
4617                 put_unaligned_be16(stream_id,
4618                                    &stream_status->stream_identifier);
4619                 stream_status->rel_lifetime = stream_id + 1;
4620         }
4621         put_unaligned_be32(offset - 8, &h->len); /* PARAMETER DATA LENGTH */
4622
4623         return fill_from_dev_buffer(scp, arr, min(offset, alloc_len));
4624 }
4625
4626 static int resp_sync_cache(struct scsi_cmnd *scp,
4627                            struct sdebug_dev_info *devip)
4628 {
4629         int res = 0;
4630         u64 lba;
4631         u32 num_blocks;
4632         u8 *cmd = scp->cmnd;
4633
4634         if (cmd[0] == SYNCHRONIZE_CACHE) {      /* 10 byte cdb */
4635                 lba = get_unaligned_be32(cmd + 2);
4636                 num_blocks = get_unaligned_be16(cmd + 7);
4637         } else {                                /* SYNCHRONIZE_CACHE(16) */
4638                 lba = get_unaligned_be64(cmd + 2);
4639                 num_blocks = get_unaligned_be32(cmd + 10);
4640         }
4641         if (lba + num_blocks > sdebug_capacity) {
4642                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4643                 return check_condition_result;
4644         }
4645         if (!write_since_sync || (cmd[1] & 0x2))
4646                 res = SDEG_RES_IMMED_MASK;
4647         else            /* delay if write_since_sync and IMMED clear */
4648                 write_since_sync = false;
4649         return res;
4650 }
4651
4652 /*
4653  * Assuming the LBA+num_blocks is not out-of-range, this function will return
4654  * CONDITION MET if the specified blocks will/have fitted in the cache, and
4655  * a GOOD status otherwise. Model a disk with a big cache and yield
4656  * CONDITION MET. Actually tries to bring range in main memory into the
4657  * cache associated with the CPU(s).
4658  */
4659 static int resp_pre_fetch(struct scsi_cmnd *scp,
4660                           struct sdebug_dev_info *devip)
4661 {
4662         int res = 0;
4663         u64 lba;
4664         u64 block, rest = 0;
4665         u32 nblks;
4666         u8 *cmd = scp->cmnd;
4667         struct sdeb_store_info *sip = devip2sip(devip, true);
4668         u8 *fsp = sip->storep;
4669
4670         if (cmd[0] == PRE_FETCH) {      /* 10 byte cdb */
4671                 lba = get_unaligned_be32(cmd + 2);
4672                 nblks = get_unaligned_be16(cmd + 7);
4673         } else {                        /* PRE-FETCH(16) */
4674                 lba = get_unaligned_be64(cmd + 2);
4675                 nblks = get_unaligned_be32(cmd + 10);
4676         }
4677         if (lba + nblks > sdebug_capacity) {
4678                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4679                 return check_condition_result;
4680         }
4681         if (!fsp)
4682                 goto fini;
4683         /* PRE-FETCH spec says nothing about LBP or PI so skip them */
4684         block = do_div(lba, sdebug_store_sectors);
4685         if (block + nblks > sdebug_store_sectors)
4686                 rest = block + nblks - sdebug_store_sectors;
4687
4688         /* Try to bring the PRE-FETCH range into CPU's cache */
4689         sdeb_read_lock(sip);
4690         prefetch_range(fsp + (sdebug_sector_size * block),
4691                        (nblks - rest) * sdebug_sector_size);
4692         if (rest)
4693                 prefetch_range(fsp, rest * sdebug_sector_size);
4694         sdeb_read_unlock(sip);
4695 fini:
4696         if (cmd[1] & 0x2)
4697                 res = SDEG_RES_IMMED_MASK;
4698         return res | condition_met_result;
4699 }
4700
4701 #define RL_BUCKET_ELEMS 8
4702
4703 /* Even though each pseudo target has a REPORT LUNS "well known logical unit"
4704  * (W-LUN), the normal Linux scanning logic does not associate it with a
4705  * device (e.g. /dev/sg7). The following magic will make that association:
4706  *   "cd /sys/class/scsi_host/host<n> ; echo '- - 49409' > scan"
4707  * where <n> is a host number. If there are multiple targets in a host then
4708  * the above will associate a W-LUN to each target. To only get a W-LUN
4709  * for target 2, then use "echo '- 2 49409' > scan" .
4710  */
4711 static int resp_report_luns(struct scsi_cmnd *scp,
4712                             struct sdebug_dev_info *devip)
4713 {
4714         unsigned char *cmd = scp->cmnd;
4715         unsigned int alloc_len;
4716         unsigned char select_report;
4717         u64 lun;
4718         struct scsi_lun *lun_p;
4719         u8 arr[RL_BUCKET_ELEMS * sizeof(struct scsi_lun)];
4720         unsigned int lun_cnt;   /* normal LUN count (max: 256) */
4721         unsigned int wlun_cnt;  /* report luns W-LUN count */
4722         unsigned int tlun_cnt;  /* total LUN count */
4723         unsigned int rlen;      /* response length (in bytes) */
4724         int k, j, n, res;
4725         unsigned int off_rsp = 0;
4726         const int sz_lun = sizeof(struct scsi_lun);
4727
4728         clear_luns_changed_on_target(devip);
4729
4730         select_report = cmd[2];
4731         alloc_len = get_unaligned_be32(cmd + 6);
4732
4733         if (alloc_len < 4) {
4734                 pr_err("alloc len too small %d\n", alloc_len);
4735                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
4736                 return check_condition_result;
4737         }
4738
4739         switch (select_report) {
4740         case 0:         /* all LUNs apart from W-LUNs */
4741                 lun_cnt = sdebug_max_luns;
4742                 wlun_cnt = 0;
4743                 break;
4744         case 1:         /* only W-LUNs */
4745                 lun_cnt = 0;
4746                 wlun_cnt = 1;
4747                 break;
4748         case 2:         /* all LUNs */
4749                 lun_cnt = sdebug_max_luns;
4750                 wlun_cnt = 1;
4751                 break;
4752         case 0x10:      /* only administrative LUs */
4753         case 0x11:      /* see SPC-5 */
4754         case 0x12:      /* only subsiduary LUs owned by referenced LU */
4755         default:
4756                 pr_debug("select report invalid %d\n", select_report);
4757                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
4758                 return check_condition_result;
4759         }
4760
4761         if (sdebug_no_lun_0 && (lun_cnt > 0))
4762                 --lun_cnt;
4763
4764         tlun_cnt = lun_cnt + wlun_cnt;
4765         rlen = tlun_cnt * sz_lun;       /* excluding 8 byte header */
4766         scsi_set_resid(scp, scsi_bufflen(scp));
4767         pr_debug("select_report %d luns = %d wluns = %d no_lun0 %d\n",
4768                  select_report, lun_cnt, wlun_cnt, sdebug_no_lun_0);
4769
4770         /* loops rely on sizeof response header same as sizeof lun (both 8) */
4771         lun = sdebug_no_lun_0 ? 1 : 0;
4772         for (k = 0, j = 0, res = 0; true; ++k, j = 0) {
4773                 memset(arr, 0, sizeof(arr));
4774                 lun_p = (struct scsi_lun *)&arr[0];
4775                 if (k == 0) {
4776                         put_unaligned_be32(rlen, &arr[0]);
4777                         ++lun_p;
4778                         j = 1;
4779                 }
4780                 for ( ; j < RL_BUCKET_ELEMS; ++j, ++lun_p) {
4781                         if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
4782                                 break;
4783                         int_to_scsilun(lun++, lun_p);
4784                         if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
4785                                 lun_p->scsi_lun[0] |= 0x40;
4786                 }
4787                 if (j < RL_BUCKET_ELEMS)
4788                         break;
4789                 n = j * sz_lun;
4790                 res = p_fill_from_dev_buffer(scp, arr, n, off_rsp);
4791                 if (res)
4792                         return res;
4793                 off_rsp += n;
4794         }
4795         if (wlun_cnt) {
4796                 int_to_scsilun(SCSI_W_LUN_REPORT_LUNS, lun_p);
4797                 ++j;
4798         }
4799         if (j > 0)
4800                 res = p_fill_from_dev_buffer(scp, arr, j * sz_lun, off_rsp);
4801         return res;
4802 }
4803
4804 static int resp_verify(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
4805 {
4806         bool is_bytchk3 = false;
4807         u8 bytchk;
4808         int ret, j;
4809         u32 vnum, a_num, off;
4810         const u32 lb_size = sdebug_sector_size;
4811         u64 lba;
4812         u8 *arr;
4813         u8 *cmd = scp->cmnd;
4814         struct sdeb_store_info *sip = devip2sip(devip, true);
4815
4816         bytchk = (cmd[1] >> 1) & 0x3;
4817         if (bytchk == 0) {
4818                 return 0;       /* always claim internal verify okay */
4819         } else if (bytchk == 2) {
4820                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 2);
4821                 return check_condition_result;
4822         } else if (bytchk == 3) {
4823                 is_bytchk3 = true;      /* 1 block sent, compared repeatedly */
4824         }
4825         switch (cmd[0]) {
4826         case VERIFY_16:
4827                 lba = get_unaligned_be64(cmd + 2);
4828                 vnum = get_unaligned_be32(cmd + 10);
4829                 break;
4830         case VERIFY:            /* is VERIFY(10) */
4831                 lba = get_unaligned_be32(cmd + 2);
4832                 vnum = get_unaligned_be16(cmd + 7);
4833                 break;
4834         default:
4835                 mk_sense_invalid_opcode(scp);
4836                 return check_condition_result;
4837         }
4838         if (vnum == 0)
4839                 return 0;       /* not an error */
4840         a_num = is_bytchk3 ? 1 : vnum;
4841         /* Treat following check like one for read (i.e. no write) access */
4842         ret = check_device_access_params(scp, lba, a_num, false);
4843         if (ret)
4844                 return ret;
4845
4846         arr = kcalloc(lb_size, vnum, GFP_ATOMIC | __GFP_NOWARN);
4847         if (!arr) {
4848                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4849                                 INSUFF_RES_ASCQ);
4850                 return check_condition_result;
4851         }
4852         /* Not changing store, so only need read access */
4853         sdeb_read_lock(sip);
4854
4855         ret = do_dout_fetch(scp, a_num, arr);
4856         if (ret == -1) {
4857                 ret = DID_ERROR << 16;
4858                 goto cleanup;
4859         } else if (sdebug_verbose && (ret < (a_num * lb_size))) {
4860                 sdev_printk(KERN_INFO, scp->device,
4861                             "%s: %s: cdb indicated=%u, IO sent=%d bytes\n",
4862                             my_name, __func__, a_num * lb_size, ret);
4863         }
4864         if (is_bytchk3) {
4865                 for (j = 1, off = lb_size; j < vnum; ++j, off += lb_size)
4866                         memcpy(arr + off, arr, lb_size);
4867         }
4868         ret = 0;
4869         if (!comp_write_worker(sip, lba, vnum, arr, true)) {
4870                 mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
4871                 ret = check_condition_result;
4872                 goto cleanup;
4873         }
4874 cleanup:
4875         sdeb_read_unlock(sip);
4876         kfree(arr);
4877         return ret;
4878 }
4879
4880 #define RZONES_DESC_HD 64
4881
4882 /* Report zones depending on start LBA and reporting options */
4883 static int resp_report_zones(struct scsi_cmnd *scp,
4884                              struct sdebug_dev_info *devip)
4885 {
4886         unsigned int rep_max_zones, nrz = 0;
4887         int ret = 0;
4888         u32 alloc_len, rep_opts, rep_len;
4889         bool partial;
4890         u64 lba, zs_lba;
4891         u8 *arr = NULL, *desc;
4892         u8 *cmd = scp->cmnd;
4893         struct sdeb_zone_state *zsp = NULL;
4894         struct sdeb_store_info *sip = devip2sip(devip, false);
4895
4896         if (!sdebug_dev_is_zoned(devip)) {
4897                 mk_sense_invalid_opcode(scp);
4898                 return check_condition_result;
4899         }
4900         zs_lba = get_unaligned_be64(cmd + 2);
4901         alloc_len = get_unaligned_be32(cmd + 10);
4902         if (alloc_len == 0)
4903                 return 0;       /* not an error */
4904         rep_opts = cmd[14] & 0x3f;
4905         partial = cmd[14] & 0x80;
4906
4907         if (zs_lba >= sdebug_capacity) {
4908                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
4909                 return check_condition_result;
4910         }
4911
4912         rep_max_zones = (alloc_len - 64) >> ilog2(RZONES_DESC_HD);
4913
4914         arr = kzalloc(alloc_len, GFP_ATOMIC | __GFP_NOWARN);
4915         if (!arr) {
4916                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
4917                                 INSUFF_RES_ASCQ);
4918                 return check_condition_result;
4919         }
4920
4921         sdeb_read_lock(sip);
4922
4923         desc = arr + 64;
4924         for (lba = zs_lba; lba < sdebug_capacity;
4925              lba = zsp->z_start + zsp->z_size) {
4926                 if (WARN_ONCE(zbc_zone(devip, lba) == zsp, "lba = %llu\n", lba))
4927                         break;
4928                 zsp = zbc_zone(devip, lba);
4929                 switch (rep_opts) {
4930                 case 0x00:
4931                         /* All zones */
4932                         break;
4933                 case 0x01:
4934                         /* Empty zones */
4935                         if (zsp->z_cond != ZC1_EMPTY)
4936                                 continue;
4937                         break;
4938                 case 0x02:
4939                         /* Implicit open zones */
4940                         if (zsp->z_cond != ZC2_IMPLICIT_OPEN)
4941                                 continue;
4942                         break;
4943                 case 0x03:
4944                         /* Explicit open zones */
4945                         if (zsp->z_cond != ZC3_EXPLICIT_OPEN)
4946                                 continue;
4947                         break;
4948                 case 0x04:
4949                         /* Closed zones */
4950                         if (zsp->z_cond != ZC4_CLOSED)
4951                                 continue;
4952                         break;
4953                 case 0x05:
4954                         /* Full zones */
4955                         if (zsp->z_cond != ZC5_FULL)
4956                                 continue;
4957                         break;
4958                 case 0x06:
4959                 case 0x07:
4960                 case 0x10:
4961                         /*
4962                          * Read-only, offline, reset WP recommended are
4963                          * not emulated: no zones to report;
4964                          */
4965                         continue;
4966                 case 0x11:
4967                         /* non-seq-resource set */
4968                         if (!zsp->z_non_seq_resource)
4969                                 continue;
4970                         break;
4971                 case 0x3e:
4972                         /* All zones except gap zones. */
4973                         if (zbc_zone_is_gap(zsp))
4974                                 continue;
4975                         break;
4976                 case 0x3f:
4977                         /* Not write pointer (conventional) zones */
4978                         if (zbc_zone_is_seq(zsp))
4979                                 continue;
4980                         break;
4981                 default:
4982                         mk_sense_buffer(scp, ILLEGAL_REQUEST,
4983                                         INVALID_FIELD_IN_CDB, 0);
4984                         ret = check_condition_result;
4985                         goto fini;
4986                 }
4987
4988                 if (nrz < rep_max_zones) {
4989                         /* Fill zone descriptor */
4990                         desc[0] = zsp->z_type;
4991                         desc[1] = zsp->z_cond << 4;
4992                         if (zsp->z_non_seq_resource)
4993                                 desc[1] |= 1 << 1;
4994                         put_unaligned_be64((u64)zsp->z_size, desc + 8);
4995                         put_unaligned_be64((u64)zsp->z_start, desc + 16);
4996                         put_unaligned_be64((u64)zsp->z_wp, desc + 24);
4997                         desc += 64;
4998                 }
4999
5000                 if (partial && nrz >= rep_max_zones)
5001                         break;
5002
5003                 nrz++;
5004         }
5005
5006         /* Report header */
5007         /* Zone list length. */
5008         put_unaligned_be32(nrz * RZONES_DESC_HD, arr + 0);
5009         /* Maximum LBA */
5010         put_unaligned_be64(sdebug_capacity - 1, arr + 8);
5011         /* Zone starting LBA granularity. */
5012         if (devip->zcap < devip->zsize)
5013                 put_unaligned_be64(devip->zsize, arr + 16);
5014
5015         rep_len = (unsigned long)desc - (unsigned long)arr;
5016         ret = fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, rep_len));
5017
5018 fini:
5019         sdeb_read_unlock(sip);
5020         kfree(arr);
5021         return ret;
5022 }
5023
5024 /* Logic transplanted from tcmu-runner, file_zbc.c */
5025 static void zbc_open_all(struct sdebug_dev_info *devip)
5026 {
5027         struct sdeb_zone_state *zsp = &devip->zstate[0];
5028         unsigned int i;
5029
5030         for (i = 0; i < devip->nr_zones; i++, zsp++) {
5031                 if (zsp->z_cond == ZC4_CLOSED)
5032                         zbc_open_zone(devip, &devip->zstate[i], true);
5033         }
5034 }
5035
5036 static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5037 {
5038         int res = 0;
5039         u64 z_id;
5040         enum sdebug_z_cond zc;
5041         u8 *cmd = scp->cmnd;
5042         struct sdeb_zone_state *zsp;
5043         bool all = cmd[14] & 0x01;
5044         struct sdeb_store_info *sip = devip2sip(devip, false);
5045
5046         if (!sdebug_dev_is_zoned(devip)) {
5047                 mk_sense_invalid_opcode(scp);
5048                 return check_condition_result;
5049         }
5050
5051         sdeb_write_lock(sip);
5052
5053         if (all) {
5054                 /* Check if all closed zones can be open */
5055                 if (devip->max_open &&
5056                     devip->nr_exp_open + devip->nr_closed > devip->max_open) {
5057                         mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
5058                                         INSUFF_ZONE_ASCQ);
5059                         res = check_condition_result;
5060                         goto fini;
5061                 }
5062                 /* Open all closed zones */
5063                 zbc_open_all(devip);
5064                 goto fini;
5065         }
5066
5067         /* Open the specified zone */
5068         z_id = get_unaligned_be64(cmd + 2);
5069         if (z_id >= sdebug_capacity) {
5070                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5071                 res = check_condition_result;
5072                 goto fini;
5073         }
5074
5075         zsp = zbc_zone(devip, z_id);
5076         if (z_id != zsp->z_start) {
5077                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5078                 res = check_condition_result;
5079                 goto fini;
5080         }
5081         if (zbc_zone_is_conv(zsp)) {
5082                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5083                 res = check_condition_result;
5084                 goto fini;
5085         }
5086
5087         zc = zsp->z_cond;
5088         if (zc == ZC3_EXPLICIT_OPEN || zc == ZC5_FULL)
5089                 goto fini;
5090
5091         if (devip->max_open && devip->nr_exp_open >= devip->max_open) {
5092                 mk_sense_buffer(scp, DATA_PROTECT, INSUFF_RES_ASC,
5093                                 INSUFF_ZONE_ASCQ);
5094                 res = check_condition_result;
5095                 goto fini;
5096         }
5097
5098         zbc_open_zone(devip, zsp, true);
5099 fini:
5100         sdeb_write_unlock(sip);
5101         return res;
5102 }
5103
5104 static void zbc_close_all(struct sdebug_dev_info *devip)
5105 {
5106         unsigned int i;
5107
5108         for (i = 0; i < devip->nr_zones; i++)
5109                 zbc_close_zone(devip, &devip->zstate[i]);
5110 }
5111
5112 static int resp_close_zone(struct scsi_cmnd *scp,
5113                            struct sdebug_dev_info *devip)
5114 {
5115         int res = 0;
5116         u64 z_id;
5117         u8 *cmd = scp->cmnd;
5118         struct sdeb_zone_state *zsp;
5119         bool all = cmd[14] & 0x01;
5120         struct sdeb_store_info *sip = devip2sip(devip, false);
5121
5122         if (!sdebug_dev_is_zoned(devip)) {
5123                 mk_sense_invalid_opcode(scp);
5124                 return check_condition_result;
5125         }
5126
5127         sdeb_write_lock(sip);
5128
5129         if (all) {
5130                 zbc_close_all(devip);
5131                 goto fini;
5132         }
5133
5134         /* Close specified zone */
5135         z_id = get_unaligned_be64(cmd + 2);
5136         if (z_id >= sdebug_capacity) {
5137                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5138                 res = check_condition_result;
5139                 goto fini;
5140         }
5141
5142         zsp = zbc_zone(devip, z_id);
5143         if (z_id != zsp->z_start) {
5144                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5145                 res = check_condition_result;
5146                 goto fini;
5147         }
5148         if (zbc_zone_is_conv(zsp)) {
5149                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5150                 res = check_condition_result;
5151                 goto fini;
5152         }
5153
5154         zbc_close_zone(devip, zsp);
5155 fini:
5156         sdeb_write_unlock(sip);
5157         return res;
5158 }
5159
5160 static void zbc_finish_zone(struct sdebug_dev_info *devip,
5161                             struct sdeb_zone_state *zsp, bool empty)
5162 {
5163         enum sdebug_z_cond zc = zsp->z_cond;
5164
5165         if (zc == ZC4_CLOSED || zc == ZC2_IMPLICIT_OPEN ||
5166             zc == ZC3_EXPLICIT_OPEN || (empty && zc == ZC1_EMPTY)) {
5167                 if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
5168                         zbc_close_zone(devip, zsp);
5169                 if (zsp->z_cond == ZC4_CLOSED)
5170                         devip->nr_closed--;
5171                 zsp->z_wp = zsp->z_start + zsp->z_size;
5172                 zsp->z_cond = ZC5_FULL;
5173         }
5174 }
5175
5176 static void zbc_finish_all(struct sdebug_dev_info *devip)
5177 {
5178         unsigned int i;
5179
5180         for (i = 0; i < devip->nr_zones; i++)
5181                 zbc_finish_zone(devip, &devip->zstate[i], false);
5182 }
5183
5184 static int resp_finish_zone(struct scsi_cmnd *scp,
5185                             struct sdebug_dev_info *devip)
5186 {
5187         struct sdeb_zone_state *zsp;
5188         int res = 0;
5189         u64 z_id;
5190         u8 *cmd = scp->cmnd;
5191         bool all = cmd[14] & 0x01;
5192         struct sdeb_store_info *sip = devip2sip(devip, false);
5193
5194         if (!sdebug_dev_is_zoned(devip)) {
5195                 mk_sense_invalid_opcode(scp);
5196                 return check_condition_result;
5197         }
5198
5199         sdeb_write_lock(sip);
5200
5201         if (all) {
5202                 zbc_finish_all(devip);
5203                 goto fini;
5204         }
5205
5206         /* Finish the specified zone */
5207         z_id = get_unaligned_be64(cmd + 2);
5208         if (z_id >= sdebug_capacity) {
5209                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5210                 res = check_condition_result;
5211                 goto fini;
5212         }
5213
5214         zsp = zbc_zone(devip, z_id);
5215         if (z_id != zsp->z_start) {
5216                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5217                 res = check_condition_result;
5218                 goto fini;
5219         }
5220         if (zbc_zone_is_conv(zsp)) {
5221                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5222                 res = check_condition_result;
5223                 goto fini;
5224         }
5225
5226         zbc_finish_zone(devip, zsp, true);
5227 fini:
5228         sdeb_write_unlock(sip);
5229         return res;
5230 }
5231
5232 static void zbc_rwp_zone(struct sdebug_dev_info *devip,
5233                          struct sdeb_zone_state *zsp)
5234 {
5235         enum sdebug_z_cond zc;
5236         struct sdeb_store_info *sip = devip2sip(devip, false);
5237
5238         if (!zbc_zone_is_seq(zsp))
5239                 return;
5240
5241         zc = zsp->z_cond;
5242         if (zc == ZC2_IMPLICIT_OPEN || zc == ZC3_EXPLICIT_OPEN)
5243                 zbc_close_zone(devip, zsp);
5244
5245         if (zsp->z_cond == ZC4_CLOSED)
5246                 devip->nr_closed--;
5247
5248         if (zsp->z_wp > zsp->z_start)
5249                 memset(sip->storep + zsp->z_start * sdebug_sector_size, 0,
5250                        (zsp->z_wp - zsp->z_start) * sdebug_sector_size);
5251
5252         zsp->z_non_seq_resource = false;
5253         zsp->z_wp = zsp->z_start;
5254         zsp->z_cond = ZC1_EMPTY;
5255 }
5256
5257 static void zbc_rwp_all(struct sdebug_dev_info *devip)
5258 {
5259         unsigned int i;
5260
5261         for (i = 0; i < devip->nr_zones; i++)
5262                 zbc_rwp_zone(devip, &devip->zstate[i]);
5263 }
5264
5265 static int resp_rwp_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
5266 {
5267         struct sdeb_zone_state *zsp;
5268         int res = 0;
5269         u64 z_id;
5270         u8 *cmd = scp->cmnd;
5271         bool all = cmd[14] & 0x01;
5272         struct sdeb_store_info *sip = devip2sip(devip, false);
5273
5274         if (!sdebug_dev_is_zoned(devip)) {
5275                 mk_sense_invalid_opcode(scp);
5276                 return check_condition_result;
5277         }
5278
5279         sdeb_write_lock(sip);
5280
5281         if (all) {
5282                 zbc_rwp_all(devip);
5283                 goto fini;
5284         }
5285
5286         z_id = get_unaligned_be64(cmd + 2);
5287         if (z_id >= sdebug_capacity) {
5288                 mk_sense_buffer(scp, ILLEGAL_REQUEST, LBA_OUT_OF_RANGE, 0);
5289                 res = check_condition_result;
5290                 goto fini;
5291         }
5292
5293         zsp = zbc_zone(devip, z_id);
5294         if (z_id != zsp->z_start) {
5295                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5296                 res = check_condition_result;
5297                 goto fini;
5298         }
5299         if (zbc_zone_is_conv(zsp)) {
5300                 mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
5301                 res = check_condition_result;
5302                 goto fini;
5303         }
5304
5305         zbc_rwp_zone(devip, zsp);
5306 fini:
5307         sdeb_write_unlock(sip);
5308         return res;
5309 }
5310
5311 static u32 get_tag(struct scsi_cmnd *cmnd)
5312 {
5313         return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
5314 }
5315
5316 /* Queued (deferred) command completions converge here. */
5317 static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
5318 {
5319         struct sdebug_queued_cmd *sqcp = container_of(sd_dp, struct sdebug_queued_cmd, sd_dp);
5320         unsigned long flags;
5321         struct scsi_cmnd *scp = sqcp->scmd;
5322         struct sdebug_scsi_cmd *sdsc;
5323         bool aborted;
5324
5325         if (sdebug_statistics) {
5326                 atomic_inc(&sdebug_completions);
5327                 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
5328                         atomic_inc(&sdebug_miss_cpus);
5329         }
5330
5331         if (!scp) {
5332                 pr_err("scmd=NULL\n");
5333                 goto out;
5334         }
5335
5336         sdsc = scsi_cmd_priv(scp);
5337         spin_lock_irqsave(&sdsc->lock, flags);
5338         aborted = sd_dp->aborted;
5339         if (unlikely(aborted))
5340                 sd_dp->aborted = false;
5341         ASSIGN_QUEUED_CMD(scp, NULL);
5342
5343         spin_unlock_irqrestore(&sdsc->lock, flags);
5344
5345         if (aborted) {
5346                 pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n");
5347                 blk_abort_request(scsi_cmd_to_rq(scp));
5348                 goto out;
5349         }
5350
5351         scsi_done(scp); /* callback to mid level */
5352 out:
5353         sdebug_free_queued_cmd(sqcp);
5354 }
5355
5356 /* When high resolution timer goes off this function is called. */
5357 static enum hrtimer_restart sdebug_q_cmd_hrt_complete(struct hrtimer *timer)
5358 {
5359         struct sdebug_defer *sd_dp = container_of(timer, struct sdebug_defer,
5360                                                   hrt);
5361         sdebug_q_cmd_complete(sd_dp);
5362         return HRTIMER_NORESTART;
5363 }
5364
5365 /* When work queue schedules work, it calls this function. */
5366 static void sdebug_q_cmd_wq_complete(struct work_struct *work)
5367 {
5368         struct sdebug_defer *sd_dp = container_of(work, struct sdebug_defer,
5369                                                   ew.work);
5370         sdebug_q_cmd_complete(sd_dp);
5371 }
5372
5373 static bool got_shared_uuid;
5374 static uuid_t shared_uuid;
5375
5376 static int sdebug_device_create_zones(struct sdebug_dev_info *devip)
5377 {
5378         struct sdeb_zone_state *zsp;
5379         sector_t capacity = get_sdebug_capacity();
5380         sector_t conv_capacity;
5381         sector_t zstart = 0;
5382         unsigned int i;
5383
5384         /*
5385          * Set the zone size: if sdeb_zbc_zone_size_mb is not set, figure out
5386          * a zone size allowing for at least 4 zones on the device. Otherwise,
5387          * use the specified zone size checking that at least 2 zones can be
5388          * created for the device.
5389          */
5390         if (!sdeb_zbc_zone_size_mb) {
5391                 devip->zsize = (DEF_ZBC_ZONE_SIZE_MB * SZ_1M)
5392                         >> ilog2(sdebug_sector_size);
5393                 while (capacity < devip->zsize << 2 && devip->zsize >= 2)
5394                         devip->zsize >>= 1;
5395                 if (devip->zsize < 2) {
5396                         pr_err("Device capacity too small\n");
5397                         return -EINVAL;
5398                 }
5399         } else {
5400                 if (!is_power_of_2(sdeb_zbc_zone_size_mb)) {
5401                         pr_err("Zone size is not a power of 2\n");
5402                         return -EINVAL;
5403                 }
5404                 devip->zsize = (sdeb_zbc_zone_size_mb * SZ_1M)
5405                         >> ilog2(sdebug_sector_size);
5406                 if (devip->zsize >= capacity) {
5407                         pr_err("Zone size too large for device capacity\n");
5408                         return -EINVAL;
5409                 }
5410         }
5411
5412         devip->zsize_shift = ilog2(devip->zsize);
5413         devip->nr_zones = (capacity + devip->zsize - 1) >> devip->zsize_shift;
5414
5415         if (sdeb_zbc_zone_cap_mb == 0) {
5416                 devip->zcap = devip->zsize;
5417         } else {
5418                 devip->zcap = (sdeb_zbc_zone_cap_mb * SZ_1M) >>
5419                               ilog2(sdebug_sector_size);
5420                 if (devip->zcap > devip->zsize) {
5421                         pr_err("Zone capacity too large\n");
5422                         return -EINVAL;
5423                 }
5424         }
5425
5426         conv_capacity = (sector_t)sdeb_zbc_nr_conv << devip->zsize_shift;
5427         if (conv_capacity >= capacity) {
5428                 pr_err("Number of conventional zones too large\n");
5429                 return -EINVAL;
5430         }
5431         devip->nr_conv_zones = sdeb_zbc_nr_conv;
5432         devip->nr_seq_zones = ALIGN(capacity - conv_capacity, devip->zsize) >>
5433                               devip->zsize_shift;
5434         devip->nr_zones = devip->nr_conv_zones + devip->nr_seq_zones;
5435
5436         /* Add gap zones if zone capacity is smaller than the zone size */
5437         if (devip->zcap < devip->zsize)
5438                 devip->nr_zones += devip->nr_seq_zones;
5439
5440         if (devip->zoned) {
5441                 /* zbc_max_open_zones can be 0, meaning "not reported" */
5442                 if (sdeb_zbc_max_open >= devip->nr_zones - 1)
5443                         devip->max_open = (devip->nr_zones - 1) / 2;
5444                 else
5445                         devip->max_open = sdeb_zbc_max_open;
5446         }
5447
5448         devip->zstate = kcalloc(devip->nr_zones,
5449                                 sizeof(struct sdeb_zone_state), GFP_KERNEL);
5450         if (!devip->zstate)
5451                 return -ENOMEM;
5452
5453         for (i = 0; i < devip->nr_zones; i++) {
5454                 zsp = &devip->zstate[i];
5455
5456                 zsp->z_start = zstart;
5457
5458                 if (i < devip->nr_conv_zones) {
5459                         zsp->z_type = ZBC_ZTYPE_CNV;
5460                         zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5461                         zsp->z_wp = (sector_t)-1;
5462                         zsp->z_size =
5463                                 min_t(u64, devip->zsize, capacity - zstart);
5464                 } else if ((zstart & (devip->zsize - 1)) == 0) {
5465                         if (devip->zoned)
5466                                 zsp->z_type = ZBC_ZTYPE_SWR;
5467                         else
5468                                 zsp->z_type = ZBC_ZTYPE_SWP;
5469                         zsp->z_cond = ZC1_EMPTY;
5470                         zsp->z_wp = zsp->z_start;
5471                         zsp->z_size =
5472                                 min_t(u64, devip->zcap, capacity - zstart);
5473                 } else {
5474                         zsp->z_type = ZBC_ZTYPE_GAP;
5475                         zsp->z_cond = ZBC_NOT_WRITE_POINTER;
5476                         zsp->z_wp = (sector_t)-1;
5477                         zsp->z_size = min_t(u64, devip->zsize - devip->zcap,
5478                                             capacity - zstart);
5479                 }
5480
5481                 WARN_ON_ONCE((int)zsp->z_size <= 0);
5482                 zstart += zsp->z_size;
5483         }
5484
5485         return 0;
5486 }
5487
5488 static struct sdebug_dev_info *sdebug_device_create(
5489                         struct sdebug_host_info *sdbg_host, gfp_t flags)
5490 {
5491         struct sdebug_dev_info *devip;
5492
5493         devip = kzalloc(sizeof(*devip), flags);
5494         if (devip) {
5495                 if (sdebug_uuid_ctl == 1)
5496                         uuid_gen(&devip->lu_name);
5497                 else if (sdebug_uuid_ctl == 2) {
5498                         if (got_shared_uuid)
5499                                 devip->lu_name = shared_uuid;
5500                         else {
5501                                 uuid_gen(&shared_uuid);
5502                                 got_shared_uuid = true;
5503                                 devip->lu_name = shared_uuid;
5504                         }
5505                 }
5506                 devip->sdbg_host = sdbg_host;
5507                 if (sdeb_zbc_in_use) {
5508                         devip->zoned = sdeb_zbc_model == BLK_ZONED_HM;
5509                         if (sdebug_device_create_zones(devip)) {
5510                                 kfree(devip);
5511                                 return NULL;
5512                         }
5513                 } else {
5514                         devip->zoned = false;
5515                 }
5516                 devip->create_ts = ktime_get_boottime();
5517                 atomic_set(&devip->stopped, (sdeb_tur_ms_to_ready > 0 ? 2 : 0));
5518                 spin_lock_init(&devip->list_lock);
5519                 INIT_LIST_HEAD(&devip->inject_err_list);
5520                 list_add_tail(&devip->dev_list, &sdbg_host->dev_info_list);
5521         }
5522         return devip;
5523 }
5524
5525 static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
5526 {
5527         struct sdebug_host_info *sdbg_host;
5528         struct sdebug_dev_info *open_devip = NULL;
5529         struct sdebug_dev_info *devip;
5530
5531         sdbg_host = shost_to_sdebug_host(sdev->host);
5532
5533         list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5534                 if ((devip->used) && (devip->channel == sdev->channel) &&
5535                     (devip->target == sdev->id) &&
5536                     (devip->lun == sdev->lun))
5537                         return devip;
5538                 else {
5539                         if ((!devip->used) && (!open_devip))
5540                                 open_devip = devip;
5541                 }
5542         }
5543         if (!open_devip) { /* try and make a new one */
5544                 open_devip = sdebug_device_create(sdbg_host, GFP_ATOMIC);
5545                 if (!open_devip) {
5546                         pr_err("out of memory at line %d\n", __LINE__);
5547                         return NULL;
5548                 }
5549         }
5550
5551         open_devip->channel = sdev->channel;
5552         open_devip->target = sdev->id;
5553         open_devip->lun = sdev->lun;
5554         open_devip->sdbg_host = sdbg_host;
5555         set_bit(SDEBUG_UA_POOCCUR, open_devip->uas_bm);
5556         open_devip->used = true;
5557         return open_devip;
5558 }
5559
5560 static int scsi_debug_slave_alloc(struct scsi_device *sdp)
5561 {
5562         if (sdebug_verbose)
5563                 pr_info("slave_alloc <%u %u %u %llu>\n",
5564                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5565
5566         return 0;
5567 }
5568
5569 static int scsi_debug_slave_configure(struct scsi_device *sdp)
5570 {
5571         struct sdebug_dev_info *devip =
5572                         (struct sdebug_dev_info *)sdp->hostdata;
5573         struct dentry *dentry;
5574
5575         if (sdebug_verbose)
5576                 pr_info("slave_configure <%u %u %u %llu>\n",
5577                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5578         if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
5579                 sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
5580         if (devip == NULL) {
5581                 devip = find_build_dev_info(sdp);
5582                 if (devip == NULL)
5583                         return 1;  /* no resources, will be marked offline */
5584         }
5585         sdp->hostdata = devip;
5586         if (sdebug_no_uld)
5587                 sdp->no_uld_attach = 1;
5588         config_cdb_len(sdp);
5589
5590         if (sdebug_allow_restart)
5591                 sdp->allow_restart = 1;
5592
5593         devip->debugfs_entry = debugfs_create_dir(dev_name(&sdp->sdev_dev),
5594                                 sdebug_debugfs_root);
5595         if (IS_ERR_OR_NULL(devip->debugfs_entry))
5596                 pr_info("%s: failed to create debugfs directory for device %s\n",
5597                         __func__, dev_name(&sdp->sdev_gendev));
5598
5599         dentry = debugfs_create_file("error", 0600, devip->debugfs_entry, sdp,
5600                                 &sdebug_error_fops);
5601         if (IS_ERR_OR_NULL(dentry))
5602                 pr_info("%s: failed to create error file for device %s\n",
5603                         __func__, dev_name(&sdp->sdev_gendev));
5604
5605         return 0;
5606 }
5607
5608 static void scsi_debug_slave_destroy(struct scsi_device *sdp)
5609 {
5610         struct sdebug_dev_info *devip =
5611                 (struct sdebug_dev_info *)sdp->hostdata;
5612         struct sdebug_err_inject *err;
5613
5614         if (sdebug_verbose)
5615                 pr_info("slave_destroy <%u %u %u %llu>\n",
5616                        sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
5617
5618         if (!devip)
5619                 return;
5620
5621         spin_lock(&devip->list_lock);
5622         list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
5623                 list_del_rcu(&err->list);
5624                 call_rcu(&err->rcu, sdebug_err_free);
5625         }
5626         spin_unlock(&devip->list_lock);
5627
5628         debugfs_remove(devip->debugfs_entry);
5629
5630         /* make this slot available for re-use */
5631         devip->used = false;
5632         sdp->hostdata = NULL;
5633 }
5634
5635 /* Returns true if we require the queued memory to be freed by the caller. */
5636 static bool stop_qc_helper(struct sdebug_defer *sd_dp,
5637                            enum sdeb_defer_type defer_t)
5638 {
5639         if (defer_t == SDEB_DEFER_HRT) {
5640                 int res = hrtimer_try_to_cancel(&sd_dp->hrt);
5641
5642                 switch (res) {
5643                 case 0: /* Not active, it must have already run */
5644                 case -1: /* -1 It's executing the CB */
5645                         return false;
5646                 case 1: /* Was active, we've now cancelled */
5647                 default:
5648                         return true;
5649                 }
5650         } else if (defer_t == SDEB_DEFER_WQ) {
5651                 /* Cancel if pending */
5652                 if (cancel_work_sync(&sd_dp->ew.work))
5653                         return true;
5654                 /* Was not pending, so it must have run */
5655                 return false;
5656         } else if (defer_t == SDEB_DEFER_POLL) {
5657                 return true;
5658         }
5659
5660         return false;
5661 }
5662
5663
5664 static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
5665 {
5666         enum sdeb_defer_type l_defer_t;
5667         struct sdebug_defer *sd_dp;
5668         struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5669         struct sdebug_queued_cmd *sqcp = TO_QUEUED_CMD(cmnd);
5670
5671         lockdep_assert_held(&sdsc->lock);
5672
5673         if (!sqcp)
5674                 return false;
5675         sd_dp = &sqcp->sd_dp;
5676         l_defer_t = READ_ONCE(sd_dp->defer_t);
5677         ASSIGN_QUEUED_CMD(cmnd, NULL);
5678
5679         if (stop_qc_helper(sd_dp, l_defer_t))
5680                 sdebug_free_queued_cmd(sqcp);
5681
5682         return true;
5683 }
5684
5685 /*
5686  * Called from scsi_debug_abort() only, which is for timed-out cmd.
5687  */
5688 static bool scsi_debug_abort_cmnd(struct scsi_cmnd *cmnd)
5689 {
5690         struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
5691         unsigned long flags;
5692         bool res;
5693
5694         spin_lock_irqsave(&sdsc->lock, flags);
5695         res = scsi_debug_stop_cmnd(cmnd);
5696         spin_unlock_irqrestore(&sdsc->lock, flags);
5697
5698         return res;
5699 }
5700
5701 /*
5702  * All we can do is set the cmnd as internally aborted and wait for it to
5703  * finish. We cannot call scsi_done() as normal completion path may do that.
5704  */
5705 static bool sdebug_stop_cmnd(struct request *rq, void *data)
5706 {
5707         scsi_debug_abort_cmnd(blk_mq_rq_to_pdu(rq));
5708
5709         return true;
5710 }
5711
5712 /* Deletes (stops) timers or work queues of all queued commands */
5713 static void stop_all_queued(void)
5714 {
5715         struct sdebug_host_info *sdhp;
5716
5717         mutex_lock(&sdebug_host_list_mutex);
5718         list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
5719                 struct Scsi_Host *shost = sdhp->shost;
5720
5721                 blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_stop_cmnd, NULL);
5722         }
5723         mutex_unlock(&sdebug_host_list_mutex);
5724 }
5725
5726 static int sdebug_fail_abort(struct scsi_cmnd *cmnd)
5727 {
5728         struct scsi_device *sdp = cmnd->device;
5729         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
5730         struct sdebug_err_inject *err;
5731         unsigned char *cmd = cmnd->cmnd;
5732         int ret = 0;
5733
5734         if (devip == NULL)
5735                 return 0;
5736
5737         rcu_read_lock();
5738         list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
5739                 if (err->type == ERR_ABORT_CMD_FAILED &&
5740                     (err->cmd == cmd[0] || err->cmd == 0xff)) {
5741                         ret = !!err->cnt;
5742                         if (err->cnt < 0)
5743                                 err->cnt++;
5744
5745                         rcu_read_unlock();
5746                         return ret;
5747                 }
5748         }
5749         rcu_read_unlock();
5750
5751         return 0;
5752 }
5753
5754 static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
5755 {
5756         bool ok = scsi_debug_abort_cmnd(SCpnt);
5757         u8 *cmd = SCpnt->cmnd;
5758         u8 opcode = cmd[0];
5759
5760         ++num_aborts;
5761
5762         if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5763                 sdev_printk(KERN_INFO, SCpnt->device,
5764                             "%s: command%s found\n", __func__,
5765                             ok ? "" : " not");
5766
5767         if (sdebug_fail_abort(SCpnt)) {
5768                 scmd_printk(KERN_INFO, SCpnt, "fail abort command 0x%x\n",
5769                             opcode);
5770                 return FAILED;
5771         }
5772
5773         return SUCCESS;
5774 }
5775
5776 static bool scsi_debug_stop_all_queued_iter(struct request *rq, void *data)
5777 {
5778         struct scsi_device *sdp = data;
5779         struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
5780
5781         if (scmd->device == sdp)
5782                 scsi_debug_abort_cmnd(scmd);
5783
5784         return true;
5785 }
5786
5787 /* Deletes (stops) timers or work queues of all queued commands per sdev */
5788 static void scsi_debug_stop_all_queued(struct scsi_device *sdp)
5789 {
5790         struct Scsi_Host *shost = sdp->host;
5791
5792         blk_mq_tagset_busy_iter(&shost->tag_set,
5793                                 scsi_debug_stop_all_queued_iter, sdp);
5794 }
5795
5796 static int sdebug_fail_lun_reset(struct scsi_cmnd *cmnd)
5797 {
5798         struct scsi_device *sdp = cmnd->device;
5799         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
5800         struct sdebug_err_inject *err;
5801         unsigned char *cmd = cmnd->cmnd;
5802         int ret = 0;
5803
5804         if (devip == NULL)
5805                 return 0;
5806
5807         rcu_read_lock();
5808         list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
5809                 if (err->type == ERR_LUN_RESET_FAILED &&
5810                     (err->cmd == cmd[0] || err->cmd == 0xff)) {
5811                         ret = !!err->cnt;
5812                         if (err->cnt < 0)
5813                                 err->cnt++;
5814
5815                         rcu_read_unlock();
5816                         return ret;
5817                 }
5818         }
5819         rcu_read_unlock();
5820
5821         return 0;
5822 }
5823
5824 static int scsi_debug_device_reset(struct scsi_cmnd *SCpnt)
5825 {
5826         struct scsi_device *sdp = SCpnt->device;
5827         struct sdebug_dev_info *devip = sdp->hostdata;
5828         u8 *cmd = SCpnt->cmnd;
5829         u8 opcode = cmd[0];
5830
5831         ++num_dev_resets;
5832
5833         if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5834                 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5835
5836         scsi_debug_stop_all_queued(sdp);
5837         if (devip)
5838                 set_bit(SDEBUG_UA_POR, devip->uas_bm);
5839
5840         if (sdebug_fail_lun_reset(SCpnt)) {
5841                 scmd_printk(KERN_INFO, SCpnt, "fail lun reset 0x%x\n", opcode);
5842                 return FAILED;
5843         }
5844
5845         return SUCCESS;
5846 }
5847
5848 static int sdebug_fail_target_reset(struct scsi_cmnd *cmnd)
5849 {
5850         struct scsi_target *starget = scsi_target(cmnd->device);
5851         struct sdebug_target_info *targetip =
5852                 (struct sdebug_target_info *)starget->hostdata;
5853
5854         if (targetip)
5855                 return targetip->reset_fail;
5856
5857         return 0;
5858 }
5859
5860 static int scsi_debug_target_reset(struct scsi_cmnd *SCpnt)
5861 {
5862         struct scsi_device *sdp = SCpnt->device;
5863         struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
5864         struct sdebug_dev_info *devip;
5865         u8 *cmd = SCpnt->cmnd;
5866         u8 opcode = cmd[0];
5867         int k = 0;
5868
5869         ++num_target_resets;
5870         if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5871                 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5872
5873         list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5874                 if (devip->target == sdp->id) {
5875                         set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5876                         ++k;
5877                 }
5878         }
5879
5880         if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5881                 sdev_printk(KERN_INFO, sdp,
5882                             "%s: %d device(s) found in target\n", __func__, k);
5883
5884         if (sdebug_fail_target_reset(SCpnt)) {
5885                 scmd_printk(KERN_INFO, SCpnt, "fail target reset 0x%x\n",
5886                             opcode);
5887                 return FAILED;
5888         }
5889
5890         return SUCCESS;
5891 }
5892
5893 static int scsi_debug_bus_reset(struct scsi_cmnd *SCpnt)
5894 {
5895         struct scsi_device *sdp = SCpnt->device;
5896         struct sdebug_host_info *sdbg_host = shost_to_sdebug_host(sdp->host);
5897         struct sdebug_dev_info *devip;
5898         int k = 0;
5899
5900         ++num_bus_resets;
5901
5902         if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5903                 sdev_printk(KERN_INFO, sdp, "%s\n", __func__);
5904
5905         list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
5906                 set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5907                 ++k;
5908         }
5909
5910         if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5911                 sdev_printk(KERN_INFO, sdp,
5912                             "%s: %d device(s) found in host\n", __func__, k);
5913         return SUCCESS;
5914 }
5915
5916 static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
5917 {
5918         struct sdebug_host_info *sdbg_host;
5919         struct sdebug_dev_info *devip;
5920         int k = 0;
5921
5922         ++num_host_resets;
5923         if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
5924                 sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
5925         mutex_lock(&sdebug_host_list_mutex);
5926         list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
5927                 list_for_each_entry(devip, &sdbg_host->dev_info_list,
5928                                     dev_list) {
5929                         set_bit(SDEBUG_UA_BUS_RESET, devip->uas_bm);
5930                         ++k;
5931                 }
5932         }
5933         mutex_unlock(&sdebug_host_list_mutex);
5934         stop_all_queued();
5935         if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
5936                 sdev_printk(KERN_INFO, SCpnt->device,
5937                             "%s: %d device(s) found\n", __func__, k);
5938         return SUCCESS;
5939 }
5940
5941 static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
5942 {
5943         struct msdos_partition *pp;
5944         int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
5945         int sectors_per_part, num_sectors, k;
5946         int heads_by_sects, start_sec, end_sec;
5947
5948         /* assume partition table already zeroed */
5949         if ((sdebug_num_parts < 1) || (store_size < 1048576))
5950                 return;
5951         if (sdebug_num_parts > SDEBUG_MAX_PARTS) {
5952                 sdebug_num_parts = SDEBUG_MAX_PARTS;
5953                 pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
5954         }
5955         num_sectors = (int)get_sdebug_capacity();
5956         sectors_per_part = (num_sectors - sdebug_sectors_per)
5957                            / sdebug_num_parts;
5958         heads_by_sects = sdebug_heads * sdebug_sectors_per;
5959         starts[0] = sdebug_sectors_per;
5960         max_part_secs = sectors_per_part;
5961         for (k = 1; k < sdebug_num_parts; ++k) {
5962                 starts[k] = ((k * sectors_per_part) / heads_by_sects)
5963                             * heads_by_sects;
5964                 if (starts[k] - starts[k - 1] < max_part_secs)
5965                         max_part_secs = starts[k] - starts[k - 1];
5966         }
5967         starts[sdebug_num_parts] = num_sectors;
5968         starts[sdebug_num_parts + 1] = 0;
5969
5970         ramp[510] = 0x55;       /* magic partition markings */
5971         ramp[511] = 0xAA;
5972         pp = (struct msdos_partition *)(ramp + 0x1be);
5973         for (k = 0; starts[k + 1]; ++k, ++pp) {
5974                 start_sec = starts[k];
5975                 end_sec = starts[k] + max_part_secs - 1;
5976                 pp->boot_ind = 0;
5977
5978                 pp->cyl = start_sec / heads_by_sects;
5979                 pp->head = (start_sec - (pp->cyl * heads_by_sects))
5980                            / sdebug_sectors_per;
5981                 pp->sector = (start_sec % sdebug_sectors_per) + 1;
5982
5983                 pp->end_cyl = end_sec / heads_by_sects;
5984                 pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects))
5985                                / sdebug_sectors_per;
5986                 pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
5987
5988                 pp->start_sect = cpu_to_le32(start_sec);
5989                 pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
5990                 pp->sys_ind = 0x83;     /* plain Linux partition */
5991         }
5992 }
5993
5994 static void block_unblock_all_queues(bool block)
5995 {
5996         struct sdebug_host_info *sdhp;
5997
5998         lockdep_assert_held(&sdebug_host_list_mutex);
5999
6000         list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6001                 struct Scsi_Host *shost = sdhp->shost;
6002
6003                 if (block)
6004                         scsi_block_requests(shost);
6005                 else
6006                         scsi_unblock_requests(shost);
6007         }
6008 }
6009
6010 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
6011  * commands will be processed normally before triggers occur.
6012  */
6013 static void tweak_cmnd_count(void)
6014 {
6015         int count, modulo;
6016
6017         modulo = abs(sdebug_every_nth);
6018         if (modulo < 2)
6019                 return;
6020
6021         mutex_lock(&sdebug_host_list_mutex);
6022         block_unblock_all_queues(true);
6023         count = atomic_read(&sdebug_cmnd_count);
6024         atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
6025         block_unblock_all_queues(false);
6026         mutex_unlock(&sdebug_host_list_mutex);
6027 }
6028
6029 static void clear_queue_stats(void)
6030 {
6031         atomic_set(&sdebug_cmnd_count, 0);
6032         atomic_set(&sdebug_completions, 0);
6033         atomic_set(&sdebug_miss_cpus, 0);
6034         atomic_set(&sdebug_a_tsf, 0);
6035 }
6036
6037 static bool inject_on_this_cmd(void)
6038 {
6039         if (sdebug_every_nth == 0)
6040                 return false;
6041         return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
6042 }
6043
6044 #define INCLUSIVE_TIMING_MAX_NS 1000000         /* 1 millisecond */
6045
6046
6047 void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp)
6048 {
6049         if (sqcp)
6050                 kmem_cache_free(queued_cmd_cache, sqcp);
6051 }
6052
6053 static struct sdebug_queued_cmd *sdebug_alloc_queued_cmd(struct scsi_cmnd *scmd)
6054 {
6055         struct sdebug_queued_cmd *sqcp;
6056         struct sdebug_defer *sd_dp;
6057
6058         sqcp = kmem_cache_zalloc(queued_cmd_cache, GFP_ATOMIC);
6059         if (!sqcp)
6060                 return NULL;
6061
6062         sd_dp = &sqcp->sd_dp;
6063
6064         hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
6065         sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
6066         INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
6067
6068         sqcp->scmd = scmd;
6069
6070         return sqcp;
6071 }
6072
6073 /* Complete the processing of the thread that queued a SCSI command to this
6074  * driver. It either completes the command by calling cmnd_done() or
6075  * schedules a hr timer or work queue then returns 0. Returns
6076  * SCSI_MLQUEUE_HOST_BUSY if temporarily out of resources.
6077  */
6078 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
6079                          int scsi_result,
6080                          int (*pfp)(struct scsi_cmnd *,
6081                                     struct sdebug_dev_info *),
6082                          int delta_jiff, int ndelay)
6083 {
6084         struct request *rq = scsi_cmd_to_rq(cmnd);
6085         bool polled = rq->cmd_flags & REQ_POLLED;
6086         struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
6087         unsigned long flags;
6088         u64 ns_from_boot = 0;
6089         struct sdebug_queued_cmd *sqcp;
6090         struct scsi_device *sdp;
6091         struct sdebug_defer *sd_dp;
6092
6093         if (unlikely(devip == NULL)) {
6094                 if (scsi_result == 0)
6095                         scsi_result = DID_NO_CONNECT << 16;
6096                 goto respond_in_thread;
6097         }
6098         sdp = cmnd->device;
6099
6100         if (delta_jiff == 0)
6101                 goto respond_in_thread;
6102
6103
6104         if (unlikely(sdebug_every_nth && (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
6105                      (scsi_result == 0))) {
6106                 int num_in_q = scsi_device_busy(sdp);
6107                 int qdepth = cmnd->device->queue_depth;
6108
6109                 if ((num_in_q == qdepth) &&
6110                     (atomic_inc_return(&sdebug_a_tsf) >=
6111                      abs(sdebug_every_nth))) {
6112                         atomic_set(&sdebug_a_tsf, 0);
6113                         scsi_result = device_qfull_result;
6114
6115                         if (unlikely(SDEBUG_OPT_Q_NOISE & sdebug_opts))
6116                                 sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, <inject> status: TASK SET FULL\n",
6117                                             __func__, num_in_q);
6118                 }
6119         }
6120
6121         sqcp = sdebug_alloc_queued_cmd(cmnd);
6122         if (!sqcp) {
6123                 pr_err("%s no alloc\n", __func__);
6124                 return SCSI_MLQUEUE_HOST_BUSY;
6125         }
6126         sd_dp = &sqcp->sd_dp;
6127
6128         if (polled)
6129                 ns_from_boot = ktime_get_boottime_ns();
6130
6131         /* one of the resp_*() response functions is called here */
6132         cmnd->result = pfp ? pfp(cmnd, devip) : 0;
6133         if (cmnd->result & SDEG_RES_IMMED_MASK) {
6134                 cmnd->result &= ~SDEG_RES_IMMED_MASK;
6135                 delta_jiff = ndelay = 0;
6136         }
6137         if (cmnd->result == 0 && scsi_result != 0)
6138                 cmnd->result = scsi_result;
6139         if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
6140                 if (atomic_read(&sdeb_inject_pending)) {
6141                         mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
6142                         atomic_set(&sdeb_inject_pending, 0);
6143                         cmnd->result = check_condition_result;
6144                 }
6145         }
6146
6147         if (unlikely(sdebug_verbose && cmnd->result))
6148                 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
6149                             __func__, cmnd->result);
6150
6151         if (delta_jiff > 0 || ndelay > 0) {
6152                 ktime_t kt;
6153
6154                 if (delta_jiff > 0) {
6155                         u64 ns = jiffies_to_nsecs(delta_jiff);
6156
6157                         if (sdebug_random && ns < U32_MAX) {
6158                                 ns = get_random_u32_below((u32)ns);
6159                         } else if (sdebug_random) {
6160                                 ns >>= 12;      /* scale to 4 usec precision */
6161                                 if (ns < U32_MAX)       /* over 4 hours max */
6162                                         ns = get_random_u32_below((u32)ns);
6163                                 ns <<= 12;
6164                         }
6165                         kt = ns_to_ktime(ns);
6166                 } else {        /* ndelay has a 4.2 second max */
6167                         kt = sdebug_random ? get_random_u32_below((u32)ndelay) :
6168                                              (u32)ndelay;
6169                         if (ndelay < INCLUSIVE_TIMING_MAX_NS) {
6170                                 u64 d = ktime_get_boottime_ns() - ns_from_boot;
6171
6172                                 if (kt <= d) {  /* elapsed duration >= kt */
6173                                         /* call scsi_done() from this thread */
6174                                         sdebug_free_queued_cmd(sqcp);
6175                                         scsi_done(cmnd);
6176                                         return 0;
6177                                 }
6178                                 /* otherwise reduce kt by elapsed time */
6179                                 kt -= d;
6180                         }
6181                 }
6182                 if (sdebug_statistics)
6183                         sd_dp->issuing_cpu = raw_smp_processor_id();
6184                 if (polled) {
6185                         spin_lock_irqsave(&sdsc->lock, flags);
6186                         sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
6187                         ASSIGN_QUEUED_CMD(cmnd, sqcp);
6188                         WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
6189                         spin_unlock_irqrestore(&sdsc->lock, flags);
6190                 } else {
6191                         /* schedule the invocation of scsi_done() for a later time */
6192                         spin_lock_irqsave(&sdsc->lock, flags);
6193                         ASSIGN_QUEUED_CMD(cmnd, sqcp);
6194                         WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
6195                         hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
6196                         /*
6197                          * The completion handler will try to grab sqcp->lock,
6198                          * so there is no chance that the completion handler
6199                          * will call scsi_done() until we release the lock
6200                          * here (so ok to keep referencing sdsc).
6201                          */
6202                         spin_unlock_irqrestore(&sdsc->lock, flags);
6203                 }
6204         } else {        /* jdelay < 0, use work queue */
6205                 if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
6206                              atomic_read(&sdeb_inject_pending))) {
6207                         sd_dp->aborted = true;
6208                         atomic_set(&sdeb_inject_pending, 0);
6209                         sdev_printk(KERN_INFO, sdp, "abort request tag=%#x\n",
6210                                     blk_mq_unique_tag_to_tag(get_tag(cmnd)));
6211                 }
6212
6213                 if (sdebug_statistics)
6214                         sd_dp->issuing_cpu = raw_smp_processor_id();
6215                 if (polled) {
6216                         spin_lock_irqsave(&sdsc->lock, flags);
6217                         ASSIGN_QUEUED_CMD(cmnd, sqcp);
6218                         sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
6219                         WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
6220                         spin_unlock_irqrestore(&sdsc->lock, flags);
6221                 } else {
6222                         spin_lock_irqsave(&sdsc->lock, flags);
6223                         ASSIGN_QUEUED_CMD(cmnd, sqcp);
6224                         WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
6225                         schedule_work(&sd_dp->ew.work);
6226                         spin_unlock_irqrestore(&sdsc->lock, flags);
6227                 }
6228         }
6229
6230         return 0;
6231
6232 respond_in_thread:      /* call back to mid-layer using invocation thread */
6233         cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
6234         cmnd->result &= ~SDEG_RES_IMMED_MASK;
6235         if (cmnd->result == 0 && scsi_result != 0)
6236                 cmnd->result = scsi_result;
6237         scsi_done(cmnd);
6238         return 0;
6239 }
6240
6241 /* Note: The following macros create attribute files in the
6242    /sys/module/scsi_debug/parameters directory. Unfortunately this
6243    driver is unaware of a change and cannot trigger auxiliary actions
6244    as it can when the corresponding attribute in the
6245    /sys/bus/pseudo/drivers/scsi_debug directory is changed.
6246  */
6247 module_param_named(add_host, sdebug_add_host, int, S_IRUGO | S_IWUSR);
6248 module_param_named(ato, sdebug_ato, int, S_IRUGO);
6249 module_param_named(cdb_len, sdebug_cdb_len, int, 0644);
6250 module_param_named(clustering, sdebug_clustering, bool, S_IRUGO | S_IWUSR);
6251 module_param_named(delay, sdebug_jdelay, int, S_IRUGO | S_IWUSR);
6252 module_param_named(dev_size_mb, sdebug_dev_size_mb, int, S_IRUGO);
6253 module_param_named(dif, sdebug_dif, int, S_IRUGO);
6254 module_param_named(dix, sdebug_dix, int, S_IRUGO);
6255 module_param_named(dsense, sdebug_dsense, int, S_IRUGO | S_IWUSR);
6256 module_param_named(every_nth, sdebug_every_nth, int, S_IRUGO | S_IWUSR);
6257 module_param_named(fake_rw, sdebug_fake_rw, int, S_IRUGO | S_IWUSR);
6258 module_param_named(guard, sdebug_guard, uint, S_IRUGO);
6259 module_param_named(host_lock, sdebug_host_lock, bool, S_IRUGO | S_IWUSR);
6260 module_param_named(host_max_queue, sdebug_host_max_queue, int, S_IRUGO);
6261 module_param_string(inq_product, sdebug_inq_product_id,
6262                     sizeof(sdebug_inq_product_id), S_IRUGO | S_IWUSR);
6263 module_param_string(inq_rev, sdebug_inq_product_rev,
6264                     sizeof(sdebug_inq_product_rev), S_IRUGO | S_IWUSR);
6265 module_param_string(inq_vendor, sdebug_inq_vendor_id,
6266                     sizeof(sdebug_inq_vendor_id), S_IRUGO | S_IWUSR);
6267 module_param_named(lbprz, sdebug_lbprz, int, S_IRUGO);
6268 module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
6269 module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
6270 module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
6271 module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
6272 module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
6273 module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
6274 module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
6275 module_param_named(medium_error_count, sdebug_medium_error_count, int,
6276                    S_IRUGO | S_IWUSR);
6277 module_param_named(medium_error_start, sdebug_medium_error_start, int,
6278                    S_IRUGO | S_IWUSR);
6279 module_param_named(ndelay, sdebug_ndelay, int, S_IRUGO | S_IWUSR);
6280 module_param_named(no_lun_0, sdebug_no_lun_0, int, S_IRUGO | S_IWUSR);
6281 module_param_named(no_rwlock, sdebug_no_rwlock, bool, S_IRUGO | S_IWUSR);
6282 module_param_named(no_uld, sdebug_no_uld, int, S_IRUGO);
6283 module_param_named(num_parts, sdebug_num_parts, int, S_IRUGO);
6284 module_param_named(num_tgts, sdebug_num_tgts, int, S_IRUGO | S_IWUSR);
6285 module_param_named(opt_blks, sdebug_opt_blks, int, S_IRUGO);
6286 module_param_named(opt_xferlen_exp, sdebug_opt_xferlen_exp, int, S_IRUGO);
6287 module_param_named(opts, sdebug_opts, int, S_IRUGO | S_IWUSR);
6288 module_param_named(per_host_store, sdebug_per_host_store, bool,
6289                    S_IRUGO | S_IWUSR);
6290 module_param_named(physblk_exp, sdebug_physblk_exp, int, S_IRUGO);
6291 module_param_named(ptype, sdebug_ptype, int, S_IRUGO | S_IWUSR);
6292 module_param_named(random, sdebug_random, bool, S_IRUGO | S_IWUSR);
6293 module_param_named(removable, sdebug_removable, bool, S_IRUGO | S_IWUSR);
6294 module_param_named(scsi_level, sdebug_scsi_level, int, S_IRUGO);
6295 module_param_named(sector_size, sdebug_sector_size, int, S_IRUGO);
6296 module_param_named(statistics, sdebug_statistics, bool, S_IRUGO | S_IWUSR);
6297 module_param_named(strict, sdebug_strict, bool, S_IRUGO | S_IWUSR);
6298 module_param_named(submit_queues, submit_queues, int, S_IRUGO);
6299 module_param_named(poll_queues, poll_queues, int, S_IRUGO);
6300 module_param_named(tur_ms_to_ready, sdeb_tur_ms_to_ready, int, S_IRUGO);
6301 module_param_named(unmap_alignment, sdebug_unmap_alignment, int, S_IRUGO);
6302 module_param_named(unmap_granularity, sdebug_unmap_granularity, int, S_IRUGO);
6303 module_param_named(unmap_max_blocks, sdebug_unmap_max_blocks, int, S_IRUGO);
6304 module_param_named(unmap_max_desc, sdebug_unmap_max_desc, int, S_IRUGO);
6305 module_param_named(uuid_ctl, sdebug_uuid_ctl, int, S_IRUGO);
6306 module_param_named(virtual_gb, sdebug_virtual_gb, int, S_IRUGO | S_IWUSR);
6307 module_param_named(vpd_use_hostno, sdebug_vpd_use_hostno, int,
6308                    S_IRUGO | S_IWUSR);
6309 module_param_named(wp, sdebug_wp, bool, S_IRUGO | S_IWUSR);
6310 module_param_named(write_same_length, sdebug_write_same_length, int,
6311                    S_IRUGO | S_IWUSR);
6312 module_param_named(zbc, sdeb_zbc_model_s, charp, S_IRUGO);
6313 module_param_named(zone_cap_mb, sdeb_zbc_zone_cap_mb, int, S_IRUGO);
6314 module_param_named(zone_max_open, sdeb_zbc_max_open, int, S_IRUGO);
6315 module_param_named(zone_nr_conv, sdeb_zbc_nr_conv, int, S_IRUGO);
6316 module_param_named(zone_size_mb, sdeb_zbc_zone_size_mb, int, S_IRUGO);
6317 module_param_named(allow_restart, sdebug_allow_restart, bool, S_IRUGO | S_IWUSR);
6318
6319 MODULE_AUTHOR("Eric Youngdale + Douglas Gilbert");
6320 MODULE_DESCRIPTION("SCSI debug adapter driver");
6321 MODULE_LICENSE("GPL");
6322 MODULE_VERSION(SDEBUG_VERSION);
6323
6324 MODULE_PARM_DESC(add_host, "add n hosts, in sysfs if negative remove host(s) (def=1)");
6325 MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)");
6326 MODULE_PARM_DESC(cdb_len, "suggest CDB lengths to drivers (def=10)");
6327 MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");
6328 MODULE_PARM_DESC(delay, "response delay (def=1 jiffy); 0:imm, -1,-2:tiny");
6329 MODULE_PARM_DESC(dev_size_mb, "size in MiB of ram shared by devs(def=8)");
6330 MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)");
6331 MODULE_PARM_DESC(dix, "data integrity extensions mask (def=0)");
6332 MODULE_PARM_DESC(dsense, "use descriptor sense format(def=0 -> fixed)");
6333 MODULE_PARM_DESC(every_nth, "timeout every nth command(def=0)");
6334 MODULE_PARM_DESC(fake_rw, "fake reads/writes instead of copying (def=0)");
6335 MODULE_PARM_DESC(guard, "protection checksum: 0=crc, 1=ip (def=0)");
6336 MODULE_PARM_DESC(host_lock, "host_lock is ignored (def=0)");
6337 MODULE_PARM_DESC(host_max_queue,
6338                  "host max # of queued cmds (0 to max(def) [max_queue fixed equal for !0])");
6339 MODULE_PARM_DESC(inq_product, "SCSI INQUIRY product string (def=\"scsi_debug\")");
6340 MODULE_PARM_DESC(inq_rev, "SCSI INQUIRY revision string (def=\""
6341                  SDEBUG_VERSION "\")");
6342 MODULE_PARM_DESC(inq_vendor, "SCSI INQUIRY vendor string (def=\"Linux\")");
6343 MODULE_PARM_DESC(lbprz,
6344                  "on read unmapped LBs return 0 when 1 (def), return 0xff when 2");
6345 MODULE_PARM_DESC(lbpu, "enable LBP, support UNMAP command (def=0)");
6346 MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=0)");
6347 MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
6348 MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
6349 MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
6350 MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
6351 MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
6352 MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
6353 MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
6354 MODULE_PARM_DESC(ndelay, "response delay in nanoseconds (def=0 -> ignore)");
6355 MODULE_PARM_DESC(no_lun_0, "no LU number 0 (def=0 -> have lun 0)");
6356 MODULE_PARM_DESC(no_rwlock, "don't protect user data reads+writes (def=0)");
6357 MODULE_PARM_DESC(no_uld, "stop ULD (e.g. sd driver) attaching (def=0))");
6358 MODULE_PARM_DESC(num_parts, "number of partitions(def=0)");
6359 MODULE_PARM_DESC(num_tgts, "number of targets per host to simulate(def=1)");
6360 MODULE_PARM_DESC(opt_blks, "optimal transfer length in blocks (def=1024)");
6361 MODULE_PARM_DESC(opt_xferlen_exp, "optimal transfer length granularity exponent (def=physblk_exp)");
6362 MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err... (def=0)");
6363 MODULE_PARM_DESC(per_host_store, "If set, next positive add_host will get new store (def=0)");
6364 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
6365 MODULE_PARM_DESC(poll_queues, "support for iouring iopoll queues (1 to max(submit_queues - 1))");
6366 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
6367 MODULE_PARM_DESC(random, "If set, uniformly randomize command duration between 0 and delay_in_ns");
6368 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
6369 MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=7[SPC-5])");
6370 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
6371 MODULE_PARM_DESC(statistics, "collect statistics on commands, queues (def=0)");
6372 MODULE_PARM_DESC(strict, "stricter checks: reserved field in cdb (def=0)");
6373 MODULE_PARM_DESC(submit_queues, "support for block multi-queue (def=1)");
6374 MODULE_PARM_DESC(tur_ms_to_ready, "TEST UNIT READY millisecs before initial good status (def=0)");
6375 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
6376 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
6377 MODULE_PARM_DESC(unmap_max_blocks, "max # of blocks can be unmapped in one cmd (def=0xffffffff)");
6378 MODULE_PARM_DESC(unmap_max_desc, "max # of ranges that can be unmapped in one cmd (def=256)");
6379 MODULE_PARM_DESC(uuid_ctl,
6380                  "1->use uuid for lu name, 0->don't, 2->all use same (def=0)");
6381 MODULE_PARM_DESC(virtual_gb, "virtual gigabyte (GiB) size (def=0 -> use dev_size_mb)");
6382 MODULE_PARM_DESC(vpd_use_hostno, "0 -> dev ids ignore hostno (def=1 -> unique dev ids)");
6383 MODULE_PARM_DESC(wp, "Write Protect (def=0)");
6384 MODULE_PARM_DESC(write_same_length, "Maximum blocks per WRITE SAME cmd (def=0xffff)");
6385 MODULE_PARM_DESC(zbc, "'none' [0]; 'aware' [1]; 'managed' [2] (def=0). Can have 'host-' prefix");
6386 MODULE_PARM_DESC(zone_cap_mb, "Zone capacity in MiB (def=zone size)");
6387 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones; [0] for no limit (def=auto)");
6388 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones (def=1)");
6389 MODULE_PARM_DESC(zone_size_mb, "Zone size in MiB (def=auto)");
6390 MODULE_PARM_DESC(allow_restart, "Set scsi_device's allow_restart flag(def=0)");
6391
6392 #define SDEBUG_INFO_LEN 256
6393 static char sdebug_info[SDEBUG_INFO_LEN];
6394
6395 static const char *scsi_debug_info(struct Scsi_Host *shp)
6396 {
6397         int k;
6398
6399         k = scnprintf(sdebug_info, SDEBUG_INFO_LEN, "%s: version %s [%s]\n",
6400                       my_name, SDEBUG_VERSION, sdebug_version_date);
6401         if (k >= (SDEBUG_INFO_LEN - 1))
6402                 return sdebug_info;
6403         scnprintf(sdebug_info + k, SDEBUG_INFO_LEN - k,
6404                   "  dev_size_mb=%d, opts=0x%x, submit_queues=%d, %s=%d",
6405                   sdebug_dev_size_mb, sdebug_opts, submit_queues,
6406                   "statistics", (int)sdebug_statistics);
6407         return sdebug_info;
6408 }
6409
6410 /* 'echo <val> > /proc/scsi/scsi_debug/<host_id>' writes to opts */
6411 static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
6412                                  int length)
6413 {
6414         char arr[16];
6415         int opts;
6416         int minLen = length > 15 ? 15 : length;
6417
6418         if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
6419                 return -EACCES;
6420         memcpy(arr, buffer, minLen);
6421         arr[minLen] = '\0';
6422         if (1 != sscanf(arr, "%d", &opts))
6423                 return -EINVAL;
6424         sdebug_opts = opts;
6425         sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6426         sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6427         if (sdebug_every_nth != 0)
6428                 tweak_cmnd_count();
6429         return length;
6430 }
6431
6432 struct sdebug_submit_queue_data {
6433         int *first;
6434         int *last;
6435         int queue_num;
6436 };
6437
6438 static bool sdebug_submit_queue_iter(struct request *rq, void *opaque)
6439 {
6440         struct sdebug_submit_queue_data *data = opaque;
6441         u32 unique_tag = blk_mq_unique_tag(rq);
6442         u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
6443         u16 tag = blk_mq_unique_tag_to_tag(unique_tag);
6444         int queue_num = data->queue_num;
6445
6446         if (hwq != queue_num)
6447                 return true;
6448
6449         /* Rely on iter'ing in ascending tag order */
6450         if (*data->first == -1)
6451                 *data->first = *data->last = tag;
6452         else
6453                 *data->last = tag;
6454
6455         return true;
6456 }
6457
6458 /* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
6459  * same for each scsi_debug host (if more than one). Some of the counters
6460  * output are not atomics so might be inaccurate in a busy system. */
6461 static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
6462 {
6463         struct sdebug_host_info *sdhp;
6464         int j;
6465
6466         seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
6467                    SDEBUG_VERSION, sdebug_version_date);
6468         seq_printf(m, "num_tgts=%d, %ssize=%d MB, opts=0x%x, every_nth=%d\n",
6469                    sdebug_num_tgts, "shared (ram) ", sdebug_dev_size_mb,
6470                    sdebug_opts, sdebug_every_nth);
6471         seq_printf(m, "delay=%d, ndelay=%d, max_luns=%d, sector_size=%d %s\n",
6472                    sdebug_jdelay, sdebug_ndelay, sdebug_max_luns,
6473                    sdebug_sector_size, "bytes");
6474         seq_printf(m, "cylinders=%d, heads=%d, sectors=%d, command aborts=%d\n",
6475                    sdebug_cylinders_per, sdebug_heads, sdebug_sectors_per,
6476                    num_aborts);
6477         seq_printf(m, "RESETs: device=%d, target=%d, bus=%d, host=%d\n",
6478                    num_dev_resets, num_target_resets, num_bus_resets,
6479                    num_host_resets);
6480         seq_printf(m, "dix_reads=%d, dix_writes=%d, dif_errors=%d\n",
6481                    dix_reads, dix_writes, dif_errors);
6482         seq_printf(m, "usec_in_jiffy=%lu, statistics=%d\n", TICK_NSEC / 1000,
6483                    sdebug_statistics);
6484         seq_printf(m, "cmnd_count=%d, completions=%d, %s=%d, a_tsf=%d, mq_polls=%d\n",
6485                    atomic_read(&sdebug_cmnd_count),
6486                    atomic_read(&sdebug_completions),
6487                    "miss_cpus", atomic_read(&sdebug_miss_cpus),
6488                    atomic_read(&sdebug_a_tsf),
6489                    atomic_read(&sdeb_mq_poll_count));
6490
6491         seq_printf(m, "submit_queues=%d\n", submit_queues);
6492         for (j = 0; j < submit_queues; ++j) {
6493                 int f = -1, l = -1;
6494                 struct sdebug_submit_queue_data data = {
6495                         .queue_num = j,
6496                         .first = &f,
6497                         .last = &l,
6498                 };
6499                 seq_printf(m, "  queue %d:\n", j);
6500                 blk_mq_tagset_busy_iter(&host->tag_set, sdebug_submit_queue_iter,
6501                                         &data);
6502                 if (f >= 0) {
6503                         seq_printf(m, "    in_use_bm BUSY: %s: %d,%d\n",
6504                                    "first,last bits", f, l);
6505                 }
6506         }
6507
6508         seq_printf(m, "this host_no=%d\n", host->host_no);
6509         if (!xa_empty(per_store_ap)) {
6510                 bool niu;
6511                 int idx;
6512                 unsigned long l_idx;
6513                 struct sdeb_store_info *sip;
6514
6515                 seq_puts(m, "\nhost list:\n");
6516                 j = 0;
6517                 list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6518                         idx = sdhp->si_idx;
6519                         seq_printf(m, "  %d: host_no=%d, si_idx=%d\n", j,
6520                                    sdhp->shost->host_no, idx);
6521                         ++j;
6522                 }
6523                 seq_printf(m, "\nper_store array [most_recent_idx=%d]:\n",
6524                            sdeb_most_recent_idx);
6525                 j = 0;
6526                 xa_for_each(per_store_ap, l_idx, sip) {
6527                         niu = xa_get_mark(per_store_ap, l_idx,
6528                                           SDEB_XA_NOT_IN_USE);
6529                         idx = (int)l_idx;
6530                         seq_printf(m, "  %d: idx=%d%s\n", j, idx,
6531                                    (niu ? "  not_in_use" : ""));
6532                         ++j;
6533                 }
6534         }
6535         return 0;
6536 }
6537
6538 static ssize_t delay_show(struct device_driver *ddp, char *buf)
6539 {
6540         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_jdelay);
6541 }
6542 /* Returns -EBUSY if jdelay is being changed and commands are queued. The unit
6543  * of delay is jiffies.
6544  */
6545 static ssize_t delay_store(struct device_driver *ddp, const char *buf,
6546                            size_t count)
6547 {
6548         int jdelay, res;
6549
6550         if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
6551                 res = count;
6552                 if (sdebug_jdelay != jdelay) {
6553                         struct sdebug_host_info *sdhp;
6554
6555                         mutex_lock(&sdebug_host_list_mutex);
6556                         block_unblock_all_queues(true);
6557
6558                         list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6559                                 struct Scsi_Host *shost = sdhp->shost;
6560
6561                                 if (scsi_host_busy(shost)) {
6562                                         res = -EBUSY;   /* queued commands */
6563                                         break;
6564                                 }
6565                         }
6566                         if (res > 0) {
6567                                 sdebug_jdelay = jdelay;
6568                                 sdebug_ndelay = 0;
6569                         }
6570                         block_unblock_all_queues(false);
6571                         mutex_unlock(&sdebug_host_list_mutex);
6572                 }
6573                 return res;
6574         }
6575         return -EINVAL;
6576 }
6577 static DRIVER_ATTR_RW(delay);
6578
6579 static ssize_t ndelay_show(struct device_driver *ddp, char *buf)
6580 {
6581         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ndelay);
6582 }
6583 /* Returns -EBUSY if ndelay is being changed and commands are queued */
6584 /* If > 0 and accepted then sdebug_jdelay is set to JDELAY_OVERRIDDEN */
6585 static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
6586                             size_t count)
6587 {
6588         int ndelay, res;
6589
6590         if ((count > 0) && (1 == sscanf(buf, "%d", &ndelay)) &&
6591             (ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
6592                 res = count;
6593                 if (sdebug_ndelay != ndelay) {
6594                         struct sdebug_host_info *sdhp;
6595
6596                         mutex_lock(&sdebug_host_list_mutex);
6597                         block_unblock_all_queues(true);
6598
6599                         list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6600                                 struct Scsi_Host *shost = sdhp->shost;
6601
6602                                 if (scsi_host_busy(shost)) {
6603                                         res = -EBUSY;   /* queued commands */
6604                                         break;
6605                                 }
6606                         }
6607
6608                         if (res > 0) {
6609                                 sdebug_ndelay = ndelay;
6610                                 sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
6611                                                         : DEF_JDELAY;
6612                         }
6613                         block_unblock_all_queues(false);
6614                         mutex_unlock(&sdebug_host_list_mutex);
6615                 }
6616                 return res;
6617         }
6618         return -EINVAL;
6619 }
6620 static DRIVER_ATTR_RW(ndelay);
6621
6622 static ssize_t opts_show(struct device_driver *ddp, char *buf)
6623 {
6624         return scnprintf(buf, PAGE_SIZE, "0x%x\n", sdebug_opts);
6625 }
6626
6627 static ssize_t opts_store(struct device_driver *ddp, const char *buf,
6628                           size_t count)
6629 {
6630         int opts;
6631         char work[20];
6632
6633         if (sscanf(buf, "%10s", work) == 1) {
6634                 if (strncasecmp(work, "0x", 2) == 0) {
6635                         if (kstrtoint(work + 2, 16, &opts) == 0)
6636                                 goto opts_done;
6637                 } else {
6638                         if (kstrtoint(work, 10, &opts) == 0)
6639                                 goto opts_done;
6640                 }
6641         }
6642         return -EINVAL;
6643 opts_done:
6644         sdebug_opts = opts;
6645         sdebug_verbose = !!(SDEBUG_OPT_NOISE & opts);
6646         sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & opts);
6647         tweak_cmnd_count();
6648         return count;
6649 }
6650 static DRIVER_ATTR_RW(opts);
6651
6652 static ssize_t ptype_show(struct device_driver *ddp, char *buf)
6653 {
6654         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ptype);
6655 }
6656 static ssize_t ptype_store(struct device_driver *ddp, const char *buf,
6657                            size_t count)
6658 {
6659         int n;
6660
6661         /* Cannot change from or to TYPE_ZBC with sysfs */
6662         if (sdebug_ptype == TYPE_ZBC)
6663                 return -EINVAL;
6664
6665         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6666                 if (n == TYPE_ZBC)
6667                         return -EINVAL;
6668                 sdebug_ptype = n;
6669                 return count;
6670         }
6671         return -EINVAL;
6672 }
6673 static DRIVER_ATTR_RW(ptype);
6674
6675 static ssize_t dsense_show(struct device_driver *ddp, char *buf)
6676 {
6677         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dsense);
6678 }
6679 static ssize_t dsense_store(struct device_driver *ddp, const char *buf,
6680                             size_t count)
6681 {
6682         int n;
6683
6684         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6685                 sdebug_dsense = n;
6686                 return count;
6687         }
6688         return -EINVAL;
6689 }
6690 static DRIVER_ATTR_RW(dsense);
6691
6692 static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)
6693 {
6694         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_fake_rw);
6695 }
6696 static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf,
6697                              size_t count)
6698 {
6699         int n, idx;
6700
6701         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6702                 bool want_store = (n == 0);
6703                 struct sdebug_host_info *sdhp;
6704
6705                 n = (n > 0);
6706                 sdebug_fake_rw = (sdebug_fake_rw > 0);
6707                 if (sdebug_fake_rw == n)
6708                         return count;   /* not transitioning so do nothing */
6709
6710                 if (want_store) {       /* 1 --> 0 transition, set up store */
6711                         if (sdeb_first_idx < 0) {
6712                                 idx = sdebug_add_store();
6713                                 if (idx < 0)
6714                                         return idx;
6715                         } else {
6716                                 idx = sdeb_first_idx;
6717                                 xa_clear_mark(per_store_ap, idx,
6718                                               SDEB_XA_NOT_IN_USE);
6719                         }
6720                         /* make all hosts use same store */
6721                         list_for_each_entry(sdhp, &sdebug_host_list,
6722                                             host_list) {
6723                                 if (sdhp->si_idx != idx) {
6724                                         xa_set_mark(per_store_ap, sdhp->si_idx,
6725                                                     SDEB_XA_NOT_IN_USE);
6726                                         sdhp->si_idx = idx;
6727                                 }
6728                         }
6729                         sdeb_most_recent_idx = idx;
6730                 } else {        /* 0 --> 1 transition is trigger for shrink */
6731                         sdebug_erase_all_stores(true /* apart from first */);
6732                 }
6733                 sdebug_fake_rw = n;
6734                 return count;
6735         }
6736         return -EINVAL;
6737 }
6738 static DRIVER_ATTR_RW(fake_rw);
6739
6740 static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)
6741 {
6742         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_lun_0);
6743 }
6744 static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf,
6745                               size_t count)
6746 {
6747         int n;
6748
6749         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6750                 sdebug_no_lun_0 = n;
6751                 return count;
6752         }
6753         return -EINVAL;
6754 }
6755 static DRIVER_ATTR_RW(no_lun_0);
6756
6757 static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)
6758 {
6759         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_tgts);
6760 }
6761 static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf,
6762                               size_t count)
6763 {
6764         int n;
6765
6766         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6767                 sdebug_num_tgts = n;
6768                 sdebug_max_tgts_luns();
6769                 return count;
6770         }
6771         return -EINVAL;
6772 }
6773 static DRIVER_ATTR_RW(num_tgts);
6774
6775 static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)
6776 {
6777         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dev_size_mb);
6778 }
6779 static DRIVER_ATTR_RO(dev_size_mb);
6780
6781 static ssize_t per_host_store_show(struct device_driver *ddp, char *buf)
6782 {
6783         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_per_host_store);
6784 }
6785
6786 static ssize_t per_host_store_store(struct device_driver *ddp, const char *buf,
6787                                     size_t count)
6788 {
6789         bool v;
6790
6791         if (kstrtobool(buf, &v))
6792                 return -EINVAL;
6793
6794         sdebug_per_host_store = v;
6795         return count;
6796 }
6797 static DRIVER_ATTR_RW(per_host_store);
6798
6799 static ssize_t num_parts_show(struct device_driver *ddp, char *buf)
6800 {
6801         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_parts);
6802 }
6803 static DRIVER_ATTR_RO(num_parts);
6804
6805 static ssize_t every_nth_show(struct device_driver *ddp, char *buf)
6806 {
6807         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_every_nth);
6808 }
6809 static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
6810                                size_t count)
6811 {
6812         int nth;
6813         char work[20];
6814
6815         if (sscanf(buf, "%10s", work) == 1) {
6816                 if (strncasecmp(work, "0x", 2) == 0) {
6817                         if (kstrtoint(work + 2, 16, &nth) == 0)
6818                                 goto every_nth_done;
6819                 } else {
6820                         if (kstrtoint(work, 10, &nth) == 0)
6821                                 goto every_nth_done;
6822                 }
6823         }
6824         return -EINVAL;
6825
6826 every_nth_done:
6827         sdebug_every_nth = nth;
6828         if (nth && !sdebug_statistics) {
6829                 pr_info("every_nth needs statistics=1, set it\n");
6830                 sdebug_statistics = true;
6831         }
6832         tweak_cmnd_count();
6833         return count;
6834 }
6835 static DRIVER_ATTR_RW(every_nth);
6836
6837 static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
6838 {
6839         return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
6840 }
6841 static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
6842                                 size_t count)
6843 {
6844         int n;
6845         bool changed;
6846
6847         if (kstrtoint(buf, 0, &n))
6848                 return -EINVAL;
6849         if (n >= 0) {
6850                 if (n > (int)SAM_LUN_AM_FLAT) {
6851                         pr_warn("only LUN address methods 0 and 1 are supported\n");
6852                         return -EINVAL;
6853                 }
6854                 changed = ((int)sdebug_lun_am != n);
6855                 sdebug_lun_am = n;
6856                 if (changed && sdebug_scsi_level >= 5) {        /* >= SPC-3 */
6857                         struct sdebug_host_info *sdhp;
6858                         struct sdebug_dev_info *dp;
6859
6860                         mutex_lock(&sdebug_host_list_mutex);
6861                         list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
6862                                 list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
6863                                         set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
6864                                 }
6865                         }
6866                         mutex_unlock(&sdebug_host_list_mutex);
6867                 }
6868                 return count;
6869         }
6870         return -EINVAL;
6871 }
6872 static DRIVER_ATTR_RW(lun_format);
6873
6874 static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
6875 {
6876         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
6877 }
6878 static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
6879                               size_t count)
6880 {
6881         int n;
6882         bool changed;
6883
6884         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6885                 if (n > 256) {
6886                         pr_warn("max_luns can be no more than 256\n");
6887                         return -EINVAL;
6888                 }
6889                 changed = (sdebug_max_luns != n);
6890                 sdebug_max_luns = n;
6891                 sdebug_max_tgts_luns();
6892                 if (changed && (sdebug_scsi_level >= 5)) {      /* >= SPC-3 */
6893                         struct sdebug_host_info *sdhp;
6894                         struct sdebug_dev_info *dp;
6895
6896                         mutex_lock(&sdebug_host_list_mutex);
6897                         list_for_each_entry(sdhp, &sdebug_host_list,
6898                                             host_list) {
6899                                 list_for_each_entry(dp, &sdhp->dev_info_list,
6900                                                     dev_list) {
6901                                         set_bit(SDEBUG_UA_LUNS_CHANGED,
6902                                                 dp->uas_bm);
6903                                 }
6904                         }
6905                         mutex_unlock(&sdebug_host_list_mutex);
6906                 }
6907                 return count;
6908         }
6909         return -EINVAL;
6910 }
6911 static DRIVER_ATTR_RW(max_luns);
6912
6913 static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
6914 {
6915         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_queue);
6916 }
6917 /* N.B. max_queue can be changed while there are queued commands. In flight
6918  * commands beyond the new max_queue will be completed. */
6919 static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
6920                                size_t count)
6921 {
6922         int n;
6923
6924         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
6925             (n <= SDEBUG_CANQUEUE) &&
6926             (sdebug_host_max_queue == 0)) {
6927                 mutex_lock(&sdebug_host_list_mutex);
6928
6929                 /* We may only change sdebug_max_queue when we have no shosts */
6930                 if (list_empty(&sdebug_host_list))
6931                         sdebug_max_queue = n;
6932                 else
6933                         count = -EBUSY;
6934                 mutex_unlock(&sdebug_host_list_mutex);
6935                 return count;
6936         }
6937         return -EINVAL;
6938 }
6939 static DRIVER_ATTR_RW(max_queue);
6940
6941 static ssize_t host_max_queue_show(struct device_driver *ddp, char *buf)
6942 {
6943         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_host_max_queue);
6944 }
6945
6946 static ssize_t no_rwlock_show(struct device_driver *ddp, char *buf)
6947 {
6948         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_rwlock);
6949 }
6950
6951 static ssize_t no_rwlock_store(struct device_driver *ddp, const char *buf, size_t count)
6952 {
6953         bool v;
6954
6955         if (kstrtobool(buf, &v))
6956                 return -EINVAL;
6957
6958         sdebug_no_rwlock = v;
6959         return count;
6960 }
6961 static DRIVER_ATTR_RW(no_rwlock);
6962
6963 /*
6964  * Since this is used for .can_queue, and we get the hc_idx tag from the bitmap
6965  * in range [0, sdebug_host_max_queue), we can't change it.
6966  */
6967 static DRIVER_ATTR_RO(host_max_queue);
6968
6969 static ssize_t no_uld_show(struct device_driver *ddp, char *buf)
6970 {
6971         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_no_uld);
6972 }
6973 static DRIVER_ATTR_RO(no_uld);
6974
6975 static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)
6976 {
6977         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_scsi_level);
6978 }
6979 static DRIVER_ATTR_RO(scsi_level);
6980
6981 static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)
6982 {
6983         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_virtual_gb);
6984 }
6985 static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
6986                                 size_t count)
6987 {
6988         int n;
6989         bool changed;
6990
6991         /* Ignore capacity change for ZBC drives for now */
6992         if (sdeb_zbc_in_use)
6993                 return -ENOTSUPP;
6994
6995         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
6996                 changed = (sdebug_virtual_gb != n);
6997                 sdebug_virtual_gb = n;
6998                 sdebug_capacity = get_sdebug_capacity();
6999                 if (changed) {
7000                         struct sdebug_host_info *sdhp;
7001                         struct sdebug_dev_info *dp;
7002
7003                         mutex_lock(&sdebug_host_list_mutex);
7004                         list_for_each_entry(sdhp, &sdebug_host_list,
7005                                             host_list) {
7006                                 list_for_each_entry(dp, &sdhp->dev_info_list,
7007                                                     dev_list) {
7008                                         set_bit(SDEBUG_UA_CAPACITY_CHANGED,
7009                                                 dp->uas_bm);
7010                                 }
7011                         }
7012                         mutex_unlock(&sdebug_host_list_mutex);
7013                 }
7014                 return count;
7015         }
7016         return -EINVAL;
7017 }
7018 static DRIVER_ATTR_RW(virtual_gb);
7019
7020 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
7021 {
7022         /* absolute number of hosts currently active is what is shown */
7023         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
7024 }
7025
7026 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
7027                               size_t count)
7028 {
7029         bool found;
7030         unsigned long idx;
7031         struct sdeb_store_info *sip;
7032         bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
7033         int delta_hosts;
7034
7035         if (sscanf(buf, "%d", &delta_hosts) != 1)
7036                 return -EINVAL;
7037         if (delta_hosts > 0) {
7038                 do {
7039                         found = false;
7040                         if (want_phs) {
7041                                 xa_for_each_marked(per_store_ap, idx, sip,
7042                                                    SDEB_XA_NOT_IN_USE) {
7043                                         sdeb_most_recent_idx = (int)idx;
7044                                         found = true;
7045                                         break;
7046                                 }
7047                                 if (found)      /* re-use case */
7048                                         sdebug_add_host_helper((int)idx);
7049                                 else
7050                                         sdebug_do_add_host(true);
7051                         } else {
7052                                 sdebug_do_add_host(false);
7053                         }
7054                 } while (--delta_hosts);
7055         } else if (delta_hosts < 0) {
7056                 do {
7057                         sdebug_do_remove_host(false);
7058                 } while (++delta_hosts);
7059         }
7060         return count;
7061 }
7062 static DRIVER_ATTR_RW(add_host);
7063
7064 static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)
7065 {
7066         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_vpd_use_hostno);
7067 }
7068 static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf,
7069                                     size_t count)
7070 {
7071         int n;
7072
7073         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7074                 sdebug_vpd_use_hostno = n;
7075                 return count;
7076         }
7077         return -EINVAL;
7078 }
7079 static DRIVER_ATTR_RW(vpd_use_hostno);
7080
7081 static ssize_t statistics_show(struct device_driver *ddp, char *buf)
7082 {
7083         return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_statistics);
7084 }
7085 static ssize_t statistics_store(struct device_driver *ddp, const char *buf,
7086                                 size_t count)
7087 {
7088         int n;
7089
7090         if ((count > 0) && (sscanf(buf, "%d", &n) == 1) && (n >= 0)) {
7091                 if (n > 0)
7092                         sdebug_statistics = true;
7093                 else {
7094                         clear_queue_stats();
7095                         sdebug_statistics = false;
7096                 }
7097                 return count;
7098         }
7099         return -EINVAL;
7100 }
7101 static DRIVER_ATTR_RW(statistics);
7102
7103 static ssize_t sector_size_show(struct device_driver *ddp, char *buf)
7104 {
7105         return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_sector_size);
7106 }
7107 static DRIVER_ATTR_RO(sector_size);
7108
7109 static ssize_t submit_queues_show(struct device_driver *ddp, char *buf)
7110 {
7111         return scnprintf(buf, PAGE_SIZE, "%d\n", submit_queues);
7112 }
7113 static DRIVER_ATTR_RO(submit_queues);
7114
7115 static ssize_t dix_show(struct device_driver *ddp, char *buf)
7116 {
7117         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dix);
7118 }
7119 static DRIVER_ATTR_RO(dix);
7120
7121 static ssize_t dif_show(struct device_driver *ddp, char *buf)
7122 {
7123         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_dif);
7124 }
7125 static DRIVER_ATTR_RO(dif);
7126
7127 static ssize_t guard_show(struct device_driver *ddp, char *buf)
7128 {
7129         return scnprintf(buf, PAGE_SIZE, "%u\n", sdebug_guard);
7130 }
7131 static DRIVER_ATTR_RO(guard);
7132
7133 static ssize_t ato_show(struct device_driver *ddp, char *buf)
7134 {
7135         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_ato);
7136 }
7137 static DRIVER_ATTR_RO(ato);
7138
7139 static ssize_t map_show(struct device_driver *ddp, char *buf)
7140 {
7141         ssize_t count = 0;
7142
7143         if (!scsi_debug_lbp())
7144                 return scnprintf(buf, PAGE_SIZE, "0-%u\n",
7145                                  sdebug_store_sectors);
7146
7147         if (sdebug_fake_rw == 0 && !xa_empty(per_store_ap)) {
7148                 struct sdeb_store_info *sip = xa_load(per_store_ap, 0);
7149
7150                 if (sip)
7151                         count = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
7152                                           (int)map_size, sip->map_storep);
7153         }
7154         buf[count++] = '\n';
7155         buf[count] = '\0';
7156
7157         return count;
7158 }
7159 static DRIVER_ATTR_RO(map);
7160
7161 static ssize_t random_show(struct device_driver *ddp, char *buf)
7162 {
7163         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_random);
7164 }
7165
7166 static ssize_t random_store(struct device_driver *ddp, const char *buf,
7167                             size_t count)
7168 {
7169         bool v;
7170
7171         if (kstrtobool(buf, &v))
7172                 return -EINVAL;
7173
7174         sdebug_random = v;
7175         return count;
7176 }
7177 static DRIVER_ATTR_RW(random);
7178
7179 static ssize_t removable_show(struct device_driver *ddp, char *buf)
7180 {
7181         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_removable ? 1 : 0);
7182 }
7183 static ssize_t removable_store(struct device_driver *ddp, const char *buf,
7184                                size_t count)
7185 {
7186         int n;
7187
7188         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7189                 sdebug_removable = (n > 0);
7190                 return count;
7191         }
7192         return -EINVAL;
7193 }
7194 static DRIVER_ATTR_RW(removable);
7195
7196 static ssize_t host_lock_show(struct device_driver *ddp, char *buf)
7197 {
7198         return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_host_lock);
7199 }
7200 /* N.B. sdebug_host_lock does nothing, kept for backward compatibility */
7201 static ssize_t host_lock_store(struct device_driver *ddp, const char *buf,
7202                                size_t count)
7203 {
7204         int n;
7205
7206         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7207                 sdebug_host_lock = (n > 0);
7208                 return count;
7209         }
7210         return -EINVAL;
7211 }
7212 static DRIVER_ATTR_RW(host_lock);
7213
7214 static ssize_t strict_show(struct device_driver *ddp, char *buf)
7215 {
7216         return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_strict);
7217 }
7218 static ssize_t strict_store(struct device_driver *ddp, const char *buf,
7219                             size_t count)
7220 {
7221         int n;
7222
7223         if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n >= 0)) {
7224                 sdebug_strict = (n > 0);
7225                 return count;
7226         }
7227         return -EINVAL;
7228 }
7229 static DRIVER_ATTR_RW(strict);
7230
7231 static ssize_t uuid_ctl_show(struct device_driver *ddp, char *buf)
7232 {
7233         return scnprintf(buf, PAGE_SIZE, "%d\n", !!sdebug_uuid_ctl);
7234 }
7235 static DRIVER_ATTR_RO(uuid_ctl);
7236
7237 static ssize_t cdb_len_show(struct device_driver *ddp, char *buf)
7238 {
7239         return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_cdb_len);
7240 }
7241 static ssize_t cdb_len_store(struct device_driver *ddp, const char *buf,
7242                              size_t count)
7243 {
7244         int ret, n;
7245
7246         ret = kstrtoint(buf, 0, &n);
7247         if (ret)
7248                 return ret;
7249         sdebug_cdb_len = n;
7250         all_config_cdb_len();
7251         return count;
7252 }
7253 static DRIVER_ATTR_RW(cdb_len);
7254
7255 static const char * const zbc_model_strs_a[] = {
7256         [BLK_ZONED_NONE] = "none",
7257         [BLK_ZONED_HA]   = "host-aware",
7258         [BLK_ZONED_HM]   = "host-managed",
7259 };
7260
7261 static const char * const zbc_model_strs_b[] = {
7262         [BLK_ZONED_NONE] = "no",
7263         [BLK_ZONED_HA]   = "aware",
7264         [BLK_ZONED_HM]   = "managed",
7265 };
7266
7267 static const char * const zbc_model_strs_c[] = {
7268         [BLK_ZONED_NONE] = "0",
7269         [BLK_ZONED_HA]   = "1",
7270         [BLK_ZONED_HM]   = "2",
7271 };
7272
7273 static int sdeb_zbc_model_str(const char *cp)
7274 {
7275         int res = sysfs_match_string(zbc_model_strs_a, cp);
7276
7277         if (res < 0) {
7278                 res = sysfs_match_string(zbc_model_strs_b, cp);
7279                 if (res < 0) {
7280                         res = sysfs_match_string(zbc_model_strs_c, cp);
7281                         if (res < 0)
7282                                 return -EINVAL;
7283                 }
7284         }
7285         return res;
7286 }
7287
7288 static ssize_t zbc_show(struct device_driver *ddp, char *buf)
7289 {
7290         return scnprintf(buf, PAGE_SIZE, "%s\n",
7291                          zbc_model_strs_a[sdeb_zbc_model]);
7292 }
7293 static DRIVER_ATTR_RO(zbc);
7294
7295 static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
7296 {
7297         return scnprintf(buf, PAGE_SIZE, "%d\n", sdeb_tur_ms_to_ready);
7298 }
7299 static DRIVER_ATTR_RO(tur_ms_to_ready);
7300
7301 /* Note: The following array creates attribute files in the
7302    /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
7303    files (over those found in the /sys/module/scsi_debug/parameters
7304    directory) is that auxiliary actions can be triggered when an attribute
7305    is changed. For example see: add_host_store() above.
7306  */
7307
7308 static struct attribute *sdebug_drv_attrs[] = {
7309         &driver_attr_delay.attr,
7310         &driver_attr_opts.attr,
7311         &driver_attr_ptype.attr,
7312         &driver_attr_dsense.attr,
7313         &driver_attr_fake_rw.attr,
7314         &driver_attr_host_max_queue.attr,
7315         &driver_attr_no_lun_0.attr,
7316         &driver_attr_num_tgts.attr,
7317         &driver_attr_dev_size_mb.attr,
7318         &driver_attr_num_parts.attr,
7319         &driver_attr_every_nth.attr,
7320         &driver_attr_lun_format.attr,
7321         &driver_attr_max_luns.attr,
7322         &driver_attr_max_queue.attr,
7323         &driver_attr_no_rwlock.attr,
7324         &driver_attr_no_uld.attr,
7325         &driver_attr_scsi_level.attr,
7326         &driver_attr_virtual_gb.attr,
7327         &driver_attr_add_host.attr,
7328         &driver_attr_per_host_store.attr,
7329         &driver_attr_vpd_use_hostno.attr,
7330         &driver_attr_sector_size.attr,
7331         &driver_attr_statistics.attr,
7332         &driver_attr_submit_queues.attr,
7333         &driver_attr_dix.attr,
7334         &driver_attr_dif.attr,
7335         &driver_attr_guard.attr,
7336         &driver_attr_ato.attr,
7337         &driver_attr_map.attr,
7338         &driver_attr_random.attr,
7339         &driver_attr_removable.attr,
7340         &driver_attr_host_lock.attr,
7341         &driver_attr_ndelay.attr,
7342         &driver_attr_strict.attr,
7343         &driver_attr_uuid_ctl.attr,
7344         &driver_attr_cdb_len.attr,
7345         &driver_attr_tur_ms_to_ready.attr,
7346         &driver_attr_zbc.attr,
7347         NULL,
7348 };
7349 ATTRIBUTE_GROUPS(sdebug_drv);
7350
7351 static struct device *pseudo_primary;
7352
7353 static int __init scsi_debug_init(void)
7354 {
7355         bool want_store = (sdebug_fake_rw == 0);
7356         unsigned long sz;
7357         int k, ret, hosts_to_add;
7358         int idx = -1;
7359
7360         if (sdebug_ndelay >= 1000 * 1000 * 1000) {
7361                 pr_warn("ndelay must be less than 1 second, ignored\n");
7362                 sdebug_ndelay = 0;
7363         } else if (sdebug_ndelay > 0)
7364                 sdebug_jdelay = JDELAY_OVERRIDDEN;
7365
7366         switch (sdebug_sector_size) {
7367         case  512:
7368         case 1024:
7369         case 2048:
7370         case 4096:
7371                 break;
7372         default:
7373                 pr_err("invalid sector_size %d\n", sdebug_sector_size);
7374                 return -EINVAL;
7375         }
7376
7377         switch (sdebug_dif) {
7378         case T10_PI_TYPE0_PROTECTION:
7379                 break;
7380         case T10_PI_TYPE1_PROTECTION:
7381         case T10_PI_TYPE2_PROTECTION:
7382         case T10_PI_TYPE3_PROTECTION:
7383                 have_dif_prot = true;
7384                 break;
7385
7386         default:
7387                 pr_err("dif must be 0, 1, 2 or 3\n");
7388                 return -EINVAL;
7389         }
7390
7391         if (sdebug_num_tgts < 0) {
7392                 pr_err("num_tgts must be >= 0\n");
7393                 return -EINVAL;
7394         }
7395
7396         if (sdebug_guard > 1) {
7397                 pr_err("guard must be 0 or 1\n");
7398                 return -EINVAL;
7399         }
7400
7401         if (sdebug_ato > 1) {
7402                 pr_err("ato must be 0 or 1\n");
7403                 return -EINVAL;
7404         }
7405
7406         if (sdebug_physblk_exp > 15) {
7407                 pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
7408                 return -EINVAL;
7409         }
7410
7411         sdebug_lun_am = sdebug_lun_am_i;
7412         if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
7413                 pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
7414                 sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
7415         }
7416
7417         if (sdebug_max_luns > 256) {
7418                 if (sdebug_max_luns > 16384) {
7419                         pr_warn("max_luns can be no more than 16384, use default\n");
7420                         sdebug_max_luns = DEF_MAX_LUNS;
7421                 }
7422                 sdebug_lun_am = SAM_LUN_AM_FLAT;
7423         }
7424
7425         if (sdebug_lowest_aligned > 0x3fff) {
7426                 pr_err("lowest_aligned too big: %u\n", sdebug_lowest_aligned);
7427                 return -EINVAL;
7428         }
7429
7430         if (submit_queues < 1) {
7431                 pr_err("submit_queues must be 1 or more\n");
7432                 return -EINVAL;
7433         }
7434
7435         if ((sdebug_max_queue > SDEBUG_CANQUEUE) || (sdebug_max_queue < 1)) {
7436                 pr_err("max_queue must be in range [1, %d]\n", SDEBUG_CANQUEUE);
7437                 return -EINVAL;
7438         }
7439
7440         if ((sdebug_host_max_queue > SDEBUG_CANQUEUE) ||
7441             (sdebug_host_max_queue < 0)) {
7442                 pr_err("host_max_queue must be in range [0 %d]\n",
7443                        SDEBUG_CANQUEUE);
7444                 return -EINVAL;
7445         }
7446
7447         if (sdebug_host_max_queue &&
7448             (sdebug_max_queue != sdebug_host_max_queue)) {
7449                 sdebug_max_queue = sdebug_host_max_queue;
7450                 pr_warn("fixing max submit queue depth to host max queue depth, %d\n",
7451                         sdebug_max_queue);
7452         }
7453
7454         /*
7455          * check for host managed zoned block device specified with
7456          * ptype=0x14 or zbc=XXX.
7457          */
7458         if (sdebug_ptype == TYPE_ZBC) {
7459                 sdeb_zbc_model = BLK_ZONED_HM;
7460         } else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
7461                 k = sdeb_zbc_model_str(sdeb_zbc_model_s);
7462                 if (k < 0)
7463                         return k;
7464                 sdeb_zbc_model = k;
7465                 switch (sdeb_zbc_model) {
7466                 case BLK_ZONED_NONE:
7467                 case BLK_ZONED_HA:
7468                         sdebug_ptype = TYPE_DISK;
7469                         break;
7470                 case BLK_ZONED_HM:
7471                         sdebug_ptype = TYPE_ZBC;
7472                         break;
7473                 default:
7474                         pr_err("Invalid ZBC model\n");
7475                         return -EINVAL;
7476                 }
7477         }
7478         if (sdeb_zbc_model != BLK_ZONED_NONE) {
7479                 sdeb_zbc_in_use = true;
7480                 if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7481                         sdebug_dev_size_mb = DEF_ZBC_DEV_SIZE_MB;
7482         }
7483
7484         if (sdebug_dev_size_mb == DEF_DEV_SIZE_PRE_INIT)
7485                 sdebug_dev_size_mb = DEF_DEV_SIZE_MB;
7486         if (sdebug_dev_size_mb < 1)
7487                 sdebug_dev_size_mb = 1;  /* force minimum 1 MB ramdisk */
7488         sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7489         sdebug_store_sectors = sz / sdebug_sector_size;
7490         sdebug_capacity = get_sdebug_capacity();
7491
7492         /* play around with geometry, don't waste too much on track 0 */
7493         sdebug_heads = 8;
7494         sdebug_sectors_per = 32;
7495         if (sdebug_dev_size_mb >= 256)
7496                 sdebug_heads = 64;
7497         else if (sdebug_dev_size_mb >= 16)
7498                 sdebug_heads = 32;
7499         sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7500                                (sdebug_sectors_per * sdebug_heads);
7501         if (sdebug_cylinders_per >= 1024) {
7502                 /* other LLDs do this; implies >= 1GB ram disk ... */
7503                 sdebug_heads = 255;
7504                 sdebug_sectors_per = 63;
7505                 sdebug_cylinders_per = (unsigned long)sdebug_capacity /
7506                                (sdebug_sectors_per * sdebug_heads);
7507         }
7508         if (scsi_debug_lbp()) {
7509                 sdebug_unmap_max_blocks =
7510                         clamp(sdebug_unmap_max_blocks, 0U, 0xffffffffU);
7511
7512                 sdebug_unmap_max_desc =
7513                         clamp(sdebug_unmap_max_desc, 0U, 256U);
7514
7515                 sdebug_unmap_granularity =
7516                         clamp(sdebug_unmap_granularity, 1U, 0xffffffffU);
7517
7518                 if (sdebug_unmap_alignment &&
7519                     sdebug_unmap_granularity <=
7520                     sdebug_unmap_alignment) {
7521                         pr_err("ERR: unmap_granularity <= unmap_alignment\n");
7522                         return -EINVAL;
7523                 }
7524         }
7525         xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
7526         if (want_store) {
7527                 idx = sdebug_add_store();
7528                 if (idx < 0)
7529                         return idx;
7530         }
7531
7532         pseudo_primary = root_device_register("pseudo_0");
7533         if (IS_ERR(pseudo_primary)) {
7534                 pr_warn("root_device_register() error\n");
7535                 ret = PTR_ERR(pseudo_primary);
7536                 goto free_vm;
7537         }
7538         ret = bus_register(&pseudo_lld_bus);
7539         if (ret < 0) {
7540                 pr_warn("bus_register error: %d\n", ret);
7541                 goto dev_unreg;
7542         }
7543         ret = driver_register(&sdebug_driverfs_driver);
7544         if (ret < 0) {
7545                 pr_warn("driver_register error: %d\n", ret);
7546                 goto bus_unreg;
7547         }
7548
7549         hosts_to_add = sdebug_add_host;
7550         sdebug_add_host = 0;
7551
7552         queued_cmd_cache = KMEM_CACHE(sdebug_queued_cmd, SLAB_HWCACHE_ALIGN);
7553         if (!queued_cmd_cache) {
7554                 ret = -ENOMEM;
7555                 goto driver_unreg;
7556         }
7557
7558         sdebug_debugfs_root = debugfs_create_dir("scsi_debug", NULL);
7559         if (IS_ERR_OR_NULL(sdebug_debugfs_root))
7560                 pr_info("%s: failed to create initial debugfs directory\n", __func__);
7561
7562         for (k = 0; k < hosts_to_add; k++) {
7563                 if (want_store && k == 0) {
7564                         ret = sdebug_add_host_helper(idx);
7565                         if (ret < 0) {
7566                                 pr_err("add_host_helper k=%d, error=%d\n",
7567                                        k, -ret);
7568                                 break;
7569                         }
7570                 } else {
7571                         ret = sdebug_do_add_host(want_store &&
7572                                                  sdebug_per_host_store);
7573                         if (ret < 0) {
7574                                 pr_err("add_host k=%d error=%d\n", k, -ret);
7575                                 break;
7576                         }
7577                 }
7578         }
7579         if (sdebug_verbose)
7580                 pr_info("built %d host(s)\n", sdebug_num_hosts);
7581
7582         return 0;
7583
7584 driver_unreg:
7585         driver_unregister(&sdebug_driverfs_driver);
7586 bus_unreg:
7587         bus_unregister(&pseudo_lld_bus);
7588 dev_unreg:
7589         root_device_unregister(pseudo_primary);
7590 free_vm:
7591         sdebug_erase_store(idx, NULL);
7592         return ret;
7593 }
7594
7595 static void __exit scsi_debug_exit(void)
7596 {
7597         int k = sdebug_num_hosts;
7598
7599         for (; k; k--)
7600                 sdebug_do_remove_host(true);
7601         kmem_cache_destroy(queued_cmd_cache);
7602         driver_unregister(&sdebug_driverfs_driver);
7603         bus_unregister(&pseudo_lld_bus);
7604         root_device_unregister(pseudo_primary);
7605
7606         sdebug_erase_all_stores(false);
7607         xa_destroy(per_store_ap);
7608         debugfs_remove(sdebug_debugfs_root);
7609 }
7610
7611 device_initcall(scsi_debug_init);
7612 module_exit(scsi_debug_exit);
7613
7614 static void sdebug_release_adapter(struct device *dev)
7615 {
7616         struct sdebug_host_info *sdbg_host;
7617
7618         sdbg_host = dev_to_sdebug_host(dev);
7619         kfree(sdbg_host);
7620 }
7621
7622 /* idx must be valid, if sip is NULL then it will be obtained using idx */
7623 static void sdebug_erase_store(int idx, struct sdeb_store_info *sip)
7624 {
7625         if (idx < 0)
7626                 return;
7627         if (!sip) {
7628                 if (xa_empty(per_store_ap))
7629                         return;
7630                 sip = xa_load(per_store_ap, idx);
7631                 if (!sip)
7632                         return;
7633         }
7634         vfree(sip->map_storep);
7635         vfree(sip->dif_storep);
7636         vfree(sip->storep);
7637         xa_erase(per_store_ap, idx);
7638         kfree(sip);
7639 }
7640
7641 /* Assume apart_from_first==false only in shutdown case. */
7642 static void sdebug_erase_all_stores(bool apart_from_first)
7643 {
7644         unsigned long idx;
7645         struct sdeb_store_info *sip = NULL;
7646
7647         xa_for_each(per_store_ap, idx, sip) {
7648                 if (apart_from_first)
7649                         apart_from_first = false;
7650                 else
7651                         sdebug_erase_store(idx, sip);
7652         }
7653         if (apart_from_first)
7654                 sdeb_most_recent_idx = sdeb_first_idx;
7655 }
7656
7657 /*
7658  * Returns store xarray new element index (idx) if >=0 else negated errno.
7659  * Limit the number of stores to 65536.
7660  */
7661 static int sdebug_add_store(void)
7662 {
7663         int res;
7664         u32 n_idx;
7665         unsigned long iflags;
7666         unsigned long sz = (unsigned long)sdebug_dev_size_mb * 1048576;
7667         struct sdeb_store_info *sip = NULL;
7668         struct xa_limit xal = { .max = 1 << 16, .min = 0 };
7669
7670         sip = kzalloc(sizeof(*sip), GFP_KERNEL);
7671         if (!sip)
7672                 return -ENOMEM;
7673
7674         xa_lock_irqsave(per_store_ap, iflags);
7675         res = __xa_alloc(per_store_ap, &n_idx, sip, xal, GFP_ATOMIC);
7676         if (unlikely(res < 0)) {
7677                 xa_unlock_irqrestore(per_store_ap, iflags);
7678                 kfree(sip);
7679                 pr_warn("%s: xa_alloc() errno=%d\n", __func__, -res);
7680                 return res;
7681         }
7682         sdeb_most_recent_idx = n_idx;
7683         if (sdeb_first_idx < 0)
7684                 sdeb_first_idx = n_idx;
7685         xa_unlock_irqrestore(per_store_ap, iflags);
7686
7687         res = -ENOMEM;
7688         sip->storep = vzalloc(sz);
7689         if (!sip->storep) {
7690                 pr_err("user data oom\n");
7691                 goto err;
7692         }
7693         if (sdebug_num_parts > 0)
7694                 sdebug_build_parts(sip->storep, sz);
7695
7696         /* DIF/DIX: what T10 calls Protection Information (PI) */
7697         if (sdebug_dix) {
7698                 int dif_size;
7699
7700                 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
7701                 sip->dif_storep = vmalloc(dif_size);
7702
7703                 pr_info("dif_storep %u bytes @ %pK\n", dif_size,
7704                         sip->dif_storep);
7705
7706                 if (!sip->dif_storep) {
7707                         pr_err("DIX oom\n");
7708                         goto err;
7709                 }
7710                 memset(sip->dif_storep, 0xff, dif_size);
7711         }
7712         /* Logical Block Provisioning */
7713         if (scsi_debug_lbp()) {
7714                 map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
7715                 sip->map_storep = vmalloc(array_size(sizeof(long),
7716                                                      BITS_TO_LONGS(map_size)));
7717
7718                 pr_info("%lu provisioning blocks\n", map_size);
7719
7720                 if (!sip->map_storep) {
7721                         pr_err("LBP map oom\n");
7722                         goto err;
7723                 }
7724
7725                 bitmap_zero(sip->map_storep, map_size);
7726
7727                 /* Map first 1KB for partition table */
7728                 if (sdebug_num_parts)
7729                         map_region(sip, 0, 2);
7730         }
7731
7732         rwlock_init(&sip->macc_lck);
7733         return (int)n_idx;
7734 err:
7735         sdebug_erase_store((int)n_idx, sip);
7736         pr_warn("%s: failed, errno=%d\n", __func__, -res);
7737         return res;
7738 }
7739
7740 static int sdebug_add_host_helper(int per_host_idx)
7741 {
7742         int k, devs_per_host, idx;
7743         int error = -ENOMEM;
7744         struct sdebug_host_info *sdbg_host;
7745         struct sdebug_dev_info *sdbg_devinfo, *tmp;
7746
7747         sdbg_host = kzalloc(sizeof(*sdbg_host), GFP_KERNEL);
7748         if (!sdbg_host)
7749                 return -ENOMEM;
7750         idx = (per_host_idx < 0) ? sdeb_first_idx : per_host_idx;
7751         if (xa_get_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE))
7752                 xa_clear_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7753         sdbg_host->si_idx = idx;
7754
7755         INIT_LIST_HEAD(&sdbg_host->dev_info_list);
7756
7757         devs_per_host = sdebug_num_tgts * sdebug_max_luns;
7758         for (k = 0; k < devs_per_host; k++) {
7759                 sdbg_devinfo = sdebug_device_create(sdbg_host, GFP_KERNEL);
7760                 if (!sdbg_devinfo)
7761                         goto clean;
7762         }
7763
7764         mutex_lock(&sdebug_host_list_mutex);
7765         list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
7766         mutex_unlock(&sdebug_host_list_mutex);
7767
7768         sdbg_host->dev.bus = &pseudo_lld_bus;
7769         sdbg_host->dev.parent = pseudo_primary;
7770         sdbg_host->dev.release = &sdebug_release_adapter;
7771         dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
7772
7773         error = device_register(&sdbg_host->dev);
7774         if (error) {
7775                 mutex_lock(&sdebug_host_list_mutex);
7776                 list_del(&sdbg_host->host_list);
7777                 mutex_unlock(&sdebug_host_list_mutex);
7778                 goto clean;
7779         }
7780
7781         ++sdebug_num_hosts;
7782         return 0;
7783
7784 clean:
7785         list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
7786                                  dev_list) {
7787                 list_del(&sdbg_devinfo->dev_list);
7788                 kfree(sdbg_devinfo->zstate);
7789                 kfree(sdbg_devinfo);
7790         }
7791         if (sdbg_host->dev.release)
7792                 put_device(&sdbg_host->dev);
7793         else
7794                 kfree(sdbg_host);
7795         pr_warn("%s: failed, errno=%d\n", __func__, -error);
7796         return error;
7797 }
7798
7799 static int sdebug_do_add_host(bool mk_new_store)
7800 {
7801         int ph_idx = sdeb_most_recent_idx;
7802
7803         if (mk_new_store) {
7804                 ph_idx = sdebug_add_store();
7805                 if (ph_idx < 0)
7806                         return ph_idx;
7807         }
7808         return sdebug_add_host_helper(ph_idx);
7809 }
7810
7811 static void sdebug_do_remove_host(bool the_end)
7812 {
7813         int idx = -1;
7814         struct sdebug_host_info *sdbg_host = NULL;
7815         struct sdebug_host_info *sdbg_host2;
7816
7817         mutex_lock(&sdebug_host_list_mutex);
7818         if (!list_empty(&sdebug_host_list)) {
7819                 sdbg_host = list_entry(sdebug_host_list.prev,
7820                                        struct sdebug_host_info, host_list);
7821                 idx = sdbg_host->si_idx;
7822         }
7823         if (!the_end && idx >= 0) {
7824                 bool unique = true;
7825
7826                 list_for_each_entry(sdbg_host2, &sdebug_host_list, host_list) {
7827                         if (sdbg_host2 == sdbg_host)
7828                                 continue;
7829                         if (idx == sdbg_host2->si_idx) {
7830                                 unique = false;
7831                                 break;
7832                         }
7833                 }
7834                 if (unique) {
7835                         xa_set_mark(per_store_ap, idx, SDEB_XA_NOT_IN_USE);
7836                         if (idx == sdeb_most_recent_idx)
7837                                 --sdeb_most_recent_idx;
7838                 }
7839         }
7840         if (sdbg_host)
7841                 list_del(&sdbg_host->host_list);
7842         mutex_unlock(&sdebug_host_list_mutex);
7843
7844         if (!sdbg_host)
7845                 return;
7846
7847         device_unregister(&sdbg_host->dev);
7848         --sdebug_num_hosts;
7849 }
7850
7851 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
7852 {
7853         struct sdebug_dev_info *devip = sdev->hostdata;
7854
7855         if (!devip)
7856                 return  -ENODEV;
7857
7858         mutex_lock(&sdebug_host_list_mutex);
7859         block_unblock_all_queues(true);
7860
7861         if (qdepth > SDEBUG_CANQUEUE) {
7862                 qdepth = SDEBUG_CANQUEUE;
7863                 pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
7864                         qdepth, SDEBUG_CANQUEUE);
7865         }
7866         if (qdepth < 1)
7867                 qdepth = 1;
7868         if (qdepth != sdev->queue_depth)
7869                 scsi_change_queue_depth(sdev, qdepth);
7870
7871         block_unblock_all_queues(false);
7872         mutex_unlock(&sdebug_host_list_mutex);
7873
7874         if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
7875                 sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d\n", __func__, qdepth);
7876
7877         return sdev->queue_depth;
7878 }
7879
7880 static bool fake_timeout(struct scsi_cmnd *scp)
7881 {
7882         if (0 == (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth))) {
7883                 if (sdebug_every_nth < -1)
7884                         sdebug_every_nth = -1;
7885                 if (SDEBUG_OPT_TIMEOUT & sdebug_opts)
7886                         return true; /* ignore command causing timeout */
7887                 else if (SDEBUG_OPT_MAC_TIMEOUT & sdebug_opts &&
7888                          scsi_medium_access_command(scp))
7889                         return true; /* time out reads and writes */
7890         }
7891         return false;
7892 }
7893
7894 /* Response to TUR or media access command when device stopped */
7895 static int resp_not_ready(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
7896 {
7897         int stopped_state;
7898         u64 diff_ns = 0;
7899         ktime_t now_ts = ktime_get_boottime();
7900         struct scsi_device *sdp = scp->device;
7901
7902         stopped_state = atomic_read(&devip->stopped);
7903         if (stopped_state == 2) {
7904                 if (ktime_to_ns(now_ts) > ktime_to_ns(devip->create_ts)) {
7905                         diff_ns = ktime_to_ns(ktime_sub(now_ts, devip->create_ts));
7906                         if (diff_ns >= ((u64)sdeb_tur_ms_to_ready * 1000000)) {
7907                                 /* tur_ms_to_ready timer extinguished */
7908                                 atomic_set(&devip->stopped, 0);
7909                                 return 0;
7910                         }
7911                 }
7912                 mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x1);
7913                 if (sdebug_verbose)
7914                         sdev_printk(KERN_INFO, sdp,
7915                                     "%s: Not ready: in process of becoming ready\n", my_name);
7916                 if (scp->cmnd[0] == TEST_UNIT_READY) {
7917                         u64 tur_nanosecs_to_ready = (u64)sdeb_tur_ms_to_ready * 1000000;
7918
7919                         if (diff_ns <= tur_nanosecs_to_ready)
7920                                 diff_ns = tur_nanosecs_to_ready - diff_ns;
7921                         else
7922                                 diff_ns = tur_nanosecs_to_ready;
7923                         /* As per 20-061r2 approved for spc6 by T10 on 20200716 */
7924                         do_div(diff_ns, 1000000);       /* diff_ns becomes milliseconds */
7925                         scsi_set_sense_information(scp->sense_buffer, SCSI_SENSE_BUFFERSIZE,
7926                                                    diff_ns);
7927                         return check_condition_result;
7928                 }
7929         }
7930         mk_sense_buffer(scp, NOT_READY, LOGICAL_UNIT_NOT_READY, 0x2);
7931         if (sdebug_verbose)
7932                 sdev_printk(KERN_INFO, sdp, "%s: Not ready: initializing command required\n",
7933                             my_name);
7934         return check_condition_result;
7935 }
7936
7937 static void sdebug_map_queues(struct Scsi_Host *shost)
7938 {
7939         int i, qoff;
7940
7941         if (shost->nr_hw_queues == 1)
7942                 return;
7943
7944         for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
7945                 struct blk_mq_queue_map *map = &shost->tag_set.map[i];
7946
7947                 map->nr_queues  = 0;
7948
7949                 if (i == HCTX_TYPE_DEFAULT)
7950                         map->nr_queues = submit_queues - poll_queues;
7951                 else if (i == HCTX_TYPE_POLL)
7952                         map->nr_queues = poll_queues;
7953
7954                 if (!map->nr_queues) {
7955                         BUG_ON(i == HCTX_TYPE_DEFAULT);
7956                         continue;
7957                 }
7958
7959                 map->queue_offset = qoff;
7960                 blk_mq_map_queues(map);
7961
7962                 qoff += map->nr_queues;
7963         }
7964 }
7965
7966 struct sdebug_blk_mq_poll_data {
7967         unsigned int queue_num;
7968         int *num_entries;
7969 };
7970
7971 /*
7972  * We don't handle aborted commands here, but it does not seem possible to have
7973  * aborted polled commands from schedule_resp()
7974  */
7975 static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
7976 {
7977         struct sdebug_blk_mq_poll_data *data = opaque;
7978         struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
7979         struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
7980         struct sdebug_defer *sd_dp;
7981         u32 unique_tag = blk_mq_unique_tag(rq);
7982         u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
7983         struct sdebug_queued_cmd *sqcp;
7984         unsigned long flags;
7985         int queue_num = data->queue_num;
7986         ktime_t time;
7987
7988         /* We're only interested in one queue for this iteration */
7989         if (hwq != queue_num)
7990                 return true;
7991
7992         /* Subsequent checks would fail if this failed, but check anyway */
7993         if (!test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
7994                 return true;
7995
7996         time = ktime_get_boottime();
7997
7998         spin_lock_irqsave(&sdsc->lock, flags);
7999         sqcp = TO_QUEUED_CMD(cmd);
8000         if (!sqcp) {
8001                 spin_unlock_irqrestore(&sdsc->lock, flags);
8002                 return true;
8003         }
8004
8005         sd_dp = &sqcp->sd_dp;
8006         if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) {
8007                 spin_unlock_irqrestore(&sdsc->lock, flags);
8008                 return true;
8009         }
8010
8011         if (time < sd_dp->cmpl_ts) {
8012                 spin_unlock_irqrestore(&sdsc->lock, flags);
8013                 return true;
8014         }
8015
8016         ASSIGN_QUEUED_CMD(cmd, NULL);
8017         spin_unlock_irqrestore(&sdsc->lock, flags);
8018
8019         if (sdebug_statistics) {
8020                 atomic_inc(&sdebug_completions);
8021                 if (raw_smp_processor_id() != sd_dp->issuing_cpu)
8022                         atomic_inc(&sdebug_miss_cpus);
8023         }
8024
8025         sdebug_free_queued_cmd(sqcp);
8026
8027         scsi_done(cmd); /* callback to mid level */
8028         (*data->num_entries)++;
8029         return true;
8030 }
8031
8032 static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
8033 {
8034         int num_entries = 0;
8035         struct sdebug_blk_mq_poll_data data = {
8036                 .queue_num = queue_num,
8037                 .num_entries = &num_entries,
8038         };
8039
8040         blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_blk_mq_poll_iter,
8041                                 &data);
8042
8043         if (num_entries > 0)
8044                 atomic_add(num_entries, &sdeb_mq_poll_count);
8045         return num_entries;
8046 }
8047
8048 static int sdebug_timeout_cmd(struct scsi_cmnd *cmnd)
8049 {
8050         struct scsi_device *sdp = cmnd->device;
8051         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
8052         struct sdebug_err_inject *err;
8053         unsigned char *cmd = cmnd->cmnd;
8054         int ret = 0;
8055
8056         if (devip == NULL)
8057                 return 0;
8058
8059         rcu_read_lock();
8060         list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
8061                 if (err->type == ERR_TMOUT_CMD &&
8062                     (err->cmd == cmd[0] || err->cmd == 0xff)) {
8063                         ret = !!err->cnt;
8064                         if (err->cnt < 0)
8065                                 err->cnt++;
8066
8067                         rcu_read_unlock();
8068                         return ret;
8069                 }
8070         }
8071         rcu_read_unlock();
8072
8073         return 0;
8074 }
8075
8076 static int sdebug_fail_queue_cmd(struct scsi_cmnd *cmnd)
8077 {
8078         struct scsi_device *sdp = cmnd->device;
8079         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
8080         struct sdebug_err_inject *err;
8081         unsigned char *cmd = cmnd->cmnd;
8082         int ret = 0;
8083
8084         if (devip == NULL)
8085                 return 0;
8086
8087         rcu_read_lock();
8088         list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
8089                 if (err->type == ERR_FAIL_QUEUE_CMD &&
8090                     (err->cmd == cmd[0] || err->cmd == 0xff)) {
8091                         ret = err->cnt ? err->queuecmd_ret : 0;
8092                         if (err->cnt < 0)
8093                                 err->cnt++;
8094
8095                         rcu_read_unlock();
8096                         return ret;
8097                 }
8098         }
8099         rcu_read_unlock();
8100
8101         return 0;
8102 }
8103
8104 static int sdebug_fail_cmd(struct scsi_cmnd *cmnd, int *retval,
8105                            struct sdebug_err_inject *info)
8106 {
8107         struct scsi_device *sdp = cmnd->device;
8108         struct sdebug_dev_info *devip = (struct sdebug_dev_info *)sdp->hostdata;
8109         struct sdebug_err_inject *err;
8110         unsigned char *cmd = cmnd->cmnd;
8111         int ret = 0;
8112         int result;
8113
8114         if (devip == NULL)
8115                 return 0;
8116
8117         rcu_read_lock();
8118         list_for_each_entry_rcu(err, &devip->inject_err_list, list) {
8119                 if (err->type == ERR_FAIL_CMD &&
8120                     (err->cmd == cmd[0] || err->cmd == 0xff)) {
8121                         if (!err->cnt) {
8122                                 rcu_read_unlock();
8123                                 return 0;
8124                         }
8125
8126                         ret = !!err->cnt;
8127                         rcu_read_unlock();
8128                         goto out_handle;
8129                 }
8130         }
8131         rcu_read_unlock();
8132
8133         return 0;
8134
8135 out_handle:
8136         if (err->cnt < 0)
8137                 err->cnt++;
8138         mk_sense_buffer(cmnd, err->sense_key, err->asc, err->asq);
8139         result = err->status_byte | err->host_byte << 16 | err->driver_byte << 24;
8140         *info = *err;
8141         *retval = schedule_resp(cmnd, devip, result, NULL, 0, 0);
8142
8143         return ret;
8144 }
8145
8146 static int scsi_debug_queuecommand(struct Scsi_Host *shost,
8147                                    struct scsi_cmnd *scp)
8148 {
8149         u8 sdeb_i;
8150         struct scsi_device *sdp = scp->device;
8151         const struct opcode_info_t *oip;
8152         const struct opcode_info_t *r_oip;
8153         struct sdebug_dev_info *devip;
8154         u8 *cmd = scp->cmnd;
8155         int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
8156         int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
8157         int k, na;
8158         int errsts = 0;
8159         u64 lun_index = sdp->lun & 0x3FFF;
8160         u32 flags;
8161         u16 sa;
8162         u8 opcode = cmd[0];
8163         bool has_wlun_rl;
8164         bool inject_now;
8165         int ret = 0;
8166         struct sdebug_err_inject err;
8167
8168         scsi_set_resid(scp, 0);
8169         if (sdebug_statistics) {
8170                 atomic_inc(&sdebug_cmnd_count);
8171                 inject_now = inject_on_this_cmd();
8172         } else {
8173                 inject_now = false;
8174         }
8175         if (unlikely(sdebug_verbose &&
8176                      !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
8177                 char b[120];
8178                 int n, len, sb;
8179
8180                 len = scp->cmd_len;
8181                 sb = (int)sizeof(b);
8182                 if (len > 32)
8183                         strcpy(b, "too long, over 32 bytes");
8184                 else {
8185                         for (k = 0, n = 0; k < len && n < sb; ++k)
8186                                 n += scnprintf(b + n, sb - n, "%02x ",
8187                                                (u32)cmd[k]);
8188                 }
8189                 sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
8190                             blk_mq_unique_tag(scsi_cmd_to_rq(scp)), b);
8191         }
8192         if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
8193                 return SCSI_MLQUEUE_HOST_BUSY;
8194         has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
8195         if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
8196                 goto err_out;
8197
8198         sdeb_i = opcode_ind_arr[opcode];        /* fully mapped */
8199         oip = &opcode_info_arr[sdeb_i];         /* safe if table consistent */
8200         devip = (struct sdebug_dev_info *)sdp->hostdata;
8201         if (unlikely(!devip)) {
8202                 devip = find_build_dev_info(sdp);
8203                 if (NULL == devip)
8204                         goto err_out;
8205         }
8206
8207         if (sdebug_timeout_cmd(scp)) {
8208                 scmd_printk(KERN_INFO, scp, "timeout command 0x%x\n", opcode);
8209                 return 0;
8210         }
8211
8212         ret = sdebug_fail_queue_cmd(scp);
8213         if (ret) {
8214                 scmd_printk(KERN_INFO, scp, "fail queue command 0x%x with 0x%x\n",
8215                                 opcode, ret);
8216                 return ret;
8217         }
8218
8219         if (sdebug_fail_cmd(scp, &ret, &err)) {
8220                 scmd_printk(KERN_INFO, scp,
8221                         "fail command 0x%x with hostbyte=0x%x, "
8222                         "driverbyte=0x%x, statusbyte=0x%x, "
8223                         "sense_key=0x%x, asc=0x%x, asq=0x%x\n",
8224                         opcode, err.host_byte, err.driver_byte,
8225                         err.status_byte, err.sense_key, err.asc, err.asq);
8226                 return ret;
8227         }
8228
8229         if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
8230                 atomic_set(&sdeb_inject_pending, 1);
8231
8232         na = oip->num_attached;
8233         r_pfp = oip->pfp;
8234         if (na) {       /* multiple commands with this opcode */
8235                 r_oip = oip;
8236                 if (FF_SA & r_oip->flags) {
8237                         if (F_SA_LOW & oip->flags)
8238                                 sa = 0x1f & cmd[1];
8239                         else
8240                                 sa = get_unaligned_be16(cmd + 8);
8241                         for (k = 0; k <= na; oip = r_oip->arrp + k++) {
8242                                 if (opcode == oip->opcode && sa == oip->sa)
8243                                         break;
8244                         }
8245                 } else {   /* since no service action only check opcode */
8246                         for (k = 0; k <= na; oip = r_oip->arrp + k++) {
8247                                 if (opcode == oip->opcode)
8248                                         break;
8249                         }
8250                 }
8251                 if (k > na) {
8252                         if (F_SA_LOW & r_oip->flags)
8253                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 1, 4);
8254                         else if (F_SA_HIGH & r_oip->flags)
8255                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, 8, 7);
8256                         else
8257                                 mk_sense_invalid_opcode(scp);
8258                         goto check_cond;
8259                 }
8260         }       /* else (when na==0) we assume the oip is a match */
8261         flags = oip->flags;
8262         if (unlikely(F_INV_OP & flags)) {
8263                 mk_sense_invalid_opcode(scp);
8264                 goto check_cond;
8265         }
8266         if (unlikely(has_wlun_rl && !(F_RL_WLUN_OK & flags))) {
8267                 if (sdebug_verbose)
8268                         sdev_printk(KERN_INFO, sdp, "%s: Opcode 0x%x not%s\n",
8269                                     my_name, opcode, " supported for wlun");
8270                 mk_sense_invalid_opcode(scp);
8271                 goto check_cond;
8272         }
8273         if (unlikely(sdebug_strict)) {  /* check cdb against mask */
8274                 u8 rem;
8275                 int j;
8276
8277                 for (k = 1; k < oip->len_mask[0] && k < 16; ++k) {
8278                         rem = ~oip->len_mask[k] & cmd[k];
8279                         if (rem) {
8280                                 for (j = 7; j >= 0; --j, rem <<= 1) {
8281                                         if (0x80 & rem)
8282                                                 break;
8283                                 }
8284                                 mk_sense_invalid_fld(scp, SDEB_IN_CDB, k, j);
8285                                 goto check_cond;
8286                         }
8287                 }
8288         }
8289         if (unlikely(!(F_SKIP_UA & flags) &&
8290                      find_first_bit(devip->uas_bm,
8291                                     SDEBUG_NUM_UAS) != SDEBUG_NUM_UAS)) {
8292                 errsts = make_ua(scp, devip);
8293                 if (errsts)
8294                         goto check_cond;
8295         }
8296         if (unlikely(((F_M_ACCESS & flags) || scp->cmnd[0] == TEST_UNIT_READY) &&
8297                      atomic_read(&devip->stopped))) {
8298                 errsts = resp_not_ready(scp, devip);
8299                 if (errsts)
8300                         goto fini;
8301         }
8302         if (sdebug_fake_rw && (F_FAKE_RW & flags))
8303                 goto fini;
8304         if (unlikely(sdebug_every_nth)) {
8305                 if (fake_timeout(scp))
8306                         return 0;       /* ignore command: make trouble */
8307         }
8308         if (likely(oip->pfp))
8309                 pfp = oip->pfp; /* calls a resp_* function */
8310         else
8311                 pfp = r_pfp;    /* if leaf function ptr NULL, try the root's */
8312
8313 fini:
8314         if (F_DELAY_OVERR & flags)      /* cmds like INQUIRY respond asap */
8315                 return schedule_resp(scp, devip, errsts, pfp, 0, 0);
8316         else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 ||
8317                                             sdebug_ndelay > 10000)) {
8318                 /*
8319                  * Skip long delays if ndelay <= 10 microseconds. Otherwise
8320                  * for Start Stop Unit (SSU) want at least 1 second delay and
8321                  * if sdebug_jdelay>1 want a long delay of that many seconds.
8322                  * For Synchronize Cache want 1/20 of SSU's delay.
8323                  */
8324                 int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay;
8325                 int denom = (flags & F_SYNC_DELAY) ? 20 : 1;
8326
8327                 jdelay = mult_frac(USER_HZ * jdelay, HZ, denom * USER_HZ);
8328                 return schedule_resp(scp, devip, errsts, pfp, jdelay, 0);
8329         } else
8330                 return schedule_resp(scp, devip, errsts, pfp, sdebug_jdelay,
8331                                      sdebug_ndelay);
8332 check_cond:
8333         return schedule_resp(scp, devip, check_condition_result, NULL, 0, 0);
8334 err_out:
8335         return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
8336 }
8337
8338 static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
8339 {
8340         struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
8341
8342         spin_lock_init(&sdsc->lock);
8343
8344         return 0;
8345 }
8346
8347 static struct scsi_host_template sdebug_driver_template = {
8348         .show_info =            scsi_debug_show_info,
8349         .write_info =           scsi_debug_write_info,
8350         .proc_name =            sdebug_proc_name,
8351         .name =                 "SCSI DEBUG",
8352         .info =                 scsi_debug_info,
8353         .slave_alloc =          scsi_debug_slave_alloc,
8354         .slave_configure =      scsi_debug_slave_configure,
8355         .slave_destroy =        scsi_debug_slave_destroy,
8356         .ioctl =                scsi_debug_ioctl,
8357         .queuecommand =         scsi_debug_queuecommand,
8358         .change_queue_depth =   sdebug_change_qdepth,
8359         .map_queues =           sdebug_map_queues,
8360         .mq_poll =              sdebug_blk_mq_poll,
8361         .eh_abort_handler =     scsi_debug_abort,
8362         .eh_device_reset_handler = scsi_debug_device_reset,
8363         .eh_target_reset_handler = scsi_debug_target_reset,
8364         .eh_bus_reset_handler = scsi_debug_bus_reset,
8365         .eh_host_reset_handler = scsi_debug_host_reset,
8366         .can_queue =            SDEBUG_CANQUEUE,
8367         .this_id =              7,
8368         .sg_tablesize =         SG_MAX_SEGMENTS,
8369         .cmd_per_lun =          DEF_CMD_PER_LUN,
8370         .max_sectors =          -1U,
8371         .max_segment_size =     -1U,
8372         .module =               THIS_MODULE,
8373         .track_queue_depth =    1,
8374         .cmd_size = sizeof(struct sdebug_scsi_cmd),
8375         .init_cmd_priv = sdebug_init_cmd_priv,
8376         .target_alloc =         sdebug_target_alloc,
8377         .target_destroy =       sdebug_target_destroy,
8378 };
8379
8380 static int sdebug_driver_probe(struct device *dev)
8381 {
8382         int error = 0;
8383         struct sdebug_host_info *sdbg_host;
8384         struct Scsi_Host *hpnt;
8385         int hprot;
8386
8387         sdbg_host = dev_to_sdebug_host(dev);
8388
8389         sdebug_driver_template.can_queue = sdebug_max_queue;
8390         sdebug_driver_template.cmd_per_lun = sdebug_max_queue;
8391         if (!sdebug_clustering)
8392                 sdebug_driver_template.dma_boundary = PAGE_SIZE - 1;
8393
8394         hpnt = scsi_host_alloc(&sdebug_driver_template, 0);
8395         if (NULL == hpnt) {
8396                 pr_err("scsi_host_alloc failed\n");
8397                 error = -ENODEV;
8398                 return error;
8399         }
8400         if (submit_queues > nr_cpu_ids) {
8401                 pr_warn("%s: trim submit_queues (was %d) to nr_cpu_ids=%u\n",
8402                         my_name, submit_queues, nr_cpu_ids);
8403                 submit_queues = nr_cpu_ids;
8404         }
8405         /*
8406          * Decide whether to tell scsi subsystem that we want mq. The
8407          * following should give the same answer for each host.
8408          */
8409         hpnt->nr_hw_queues = submit_queues;
8410         if (sdebug_host_max_queue)
8411                 hpnt->host_tagset = 1;
8412
8413         /* poll queues are possible for nr_hw_queues > 1 */
8414         if (hpnt->nr_hw_queues == 1 || (poll_queues < 1)) {
8415                 pr_warn("%s: trim poll_queues to 0. poll_q/nr_hw = (%d/%d)\n",
8416                          my_name, poll_queues, hpnt->nr_hw_queues);
8417                 poll_queues = 0;
8418         }
8419
8420         /*
8421          * Poll queues don't need interrupts, but we need at least one I/O queue
8422          * left over for non-polled I/O.
8423          * If condition not met, trim poll_queues to 1 (just for simplicity).
8424          */
8425         if (poll_queues >= submit_queues) {
8426                 if (submit_queues < 3)
8427                         pr_warn("%s: trim poll_queues to 1\n", my_name);
8428                 else
8429                         pr_warn("%s: trim poll_queues to 1. Perhaps try poll_queues=%d\n",
8430                                 my_name, submit_queues - 1);
8431                 poll_queues = 1;
8432         }
8433         if (poll_queues)
8434                 hpnt->nr_maps = 3;
8435
8436         sdbg_host->shost = hpnt;
8437         if ((hpnt->this_id >= 0) && (sdebug_num_tgts > hpnt->this_id))
8438                 hpnt->max_id = sdebug_num_tgts + 1;
8439         else
8440                 hpnt->max_id = sdebug_num_tgts;
8441         /* = sdebug_max_luns; */
8442         hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
8443
8444         hprot = 0;
8445
8446         switch (sdebug_dif) {
8447
8448         case T10_PI_TYPE1_PROTECTION:
8449                 hprot = SHOST_DIF_TYPE1_PROTECTION;
8450                 if (sdebug_dix)
8451                         hprot |= SHOST_DIX_TYPE1_PROTECTION;
8452                 break;
8453
8454         case T10_PI_TYPE2_PROTECTION:
8455                 hprot = SHOST_DIF_TYPE2_PROTECTION;
8456                 if (sdebug_dix)
8457                         hprot |= SHOST_DIX_TYPE2_PROTECTION;
8458                 break;
8459
8460         case T10_PI_TYPE3_PROTECTION:
8461                 hprot = SHOST_DIF_TYPE3_PROTECTION;
8462                 if (sdebug_dix)
8463                         hprot |= SHOST_DIX_TYPE3_PROTECTION;
8464                 break;
8465
8466         default:
8467                 if (sdebug_dix)
8468                         hprot |= SHOST_DIX_TYPE0_PROTECTION;
8469                 break;
8470         }
8471
8472         scsi_host_set_prot(hpnt, hprot);
8473
8474         if (have_dif_prot || sdebug_dix)
8475                 pr_info("host protection%s%s%s%s%s%s%s\n",
8476                         (hprot & SHOST_DIF_TYPE1_PROTECTION) ? " DIF1" : "",
8477                         (hprot & SHOST_DIF_TYPE2_PROTECTION) ? " DIF2" : "",
8478                         (hprot & SHOST_DIF_TYPE3_PROTECTION) ? " DIF3" : "",
8479                         (hprot & SHOST_DIX_TYPE0_PROTECTION) ? " DIX0" : "",
8480                         (hprot & SHOST_DIX_TYPE1_PROTECTION) ? " DIX1" : "",
8481                         (hprot & SHOST_DIX_TYPE2_PROTECTION) ? " DIX2" : "",
8482                         (hprot & SHOST_DIX_TYPE3_PROTECTION) ? " DIX3" : "");
8483
8484         if (sdebug_guard == 1)
8485                 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_IP);
8486         else
8487                 scsi_host_set_guard(hpnt, SHOST_DIX_GUARD_CRC);
8488
8489         sdebug_verbose = !!(SDEBUG_OPT_NOISE & sdebug_opts);
8490         sdebug_any_injecting_opt = !!(SDEBUG_OPT_ALL_INJECTING & sdebug_opts);
8491         if (sdebug_every_nth)   /* need stats counters for every_nth */
8492                 sdebug_statistics = true;
8493         error = scsi_add_host(hpnt, &sdbg_host->dev);
8494         if (error) {
8495                 pr_err("scsi_add_host failed\n");
8496                 error = -ENODEV;
8497                 scsi_host_put(hpnt);
8498         } else {
8499                 scsi_scan_host(hpnt);
8500         }
8501
8502         return error;
8503 }
8504
8505 static void sdebug_driver_remove(struct device *dev)
8506 {
8507         struct sdebug_host_info *sdbg_host;
8508         struct sdebug_dev_info *sdbg_devinfo, *tmp;
8509
8510         sdbg_host = dev_to_sdebug_host(dev);
8511
8512         scsi_remove_host(sdbg_host->shost);
8513
8514         list_for_each_entry_safe(sdbg_devinfo, tmp, &sdbg_host->dev_info_list,
8515                                  dev_list) {
8516                 list_del(&sdbg_devinfo->dev_list);
8517                 kfree(sdbg_devinfo->zstate);
8518                 kfree(sdbg_devinfo);
8519         }
8520
8521         scsi_host_put(sdbg_host->shost);
8522 }
8523
8524 static struct bus_type pseudo_lld_bus = {
8525         .name = "pseudo",
8526         .probe = sdebug_driver_probe,
8527         .remove = sdebug_driver_remove,
8528         .drv_groups = sdebug_drv_groups,
8529 };
This page took 0.558808 seconds and 4 git commands to generate.