]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * The low performance USB storage driver (ub). | |
3 | * | |
4 | * Copyright (c) 1999, 2000 Matthew Dharm ([email protected]) | |
5 | * Copyright (C) 2004 Pete Zaitcev ([email protected]) | |
6 | * | |
7 | * This work is a part of Linux kernel, is derived from it, | |
8 | * and is not licensed separately. See file COPYING for details. | |
9 | * | |
10 | * TODO (sorted by decreasing priority) | |
ef45cb62 | 11 | * -- Return sense now that rq allows it (we always auto-sense anyway). |
1da177e4 LT |
12 | * -- set readonly flag for CDs, set removable flag for CF readers |
13 | * -- do inquiry and verify we got a disk and not a tape (for LUN mismatch) | |
1da177e4 | 14 | * -- verify the 13 conditions and do bulk resets |
ba6abf13 | 15 | * -- highmem |
1da177e4 LT |
16 | * -- move top_sense and work_bcs into separate allocations (if they survive) |
17 | * for cache purists and esoteric architectures. | |
ba6abf13 | 18 | * -- Allocate structure for LUN 0 before the first ub_sync_tur, avoid NULL. ? |
1da177e4 | 19 | * -- prune comments, they are too volumnous |
1da177e4 | 20 | * -- Resove XXX's |
1872bceb | 21 | * -- CLEAR, CLR2STS, CLRRS seem to be ripe for refactoring. |
1da177e4 LT |
22 | */ |
23 | #include <linux/kernel.h> | |
24 | #include <linux/module.h> | |
25 | #include <linux/usb.h> | |
a00828e9 | 26 | #include <linux/usb_usual.h> |
1da177e4 | 27 | #include <linux/blkdev.h> |
1da177e4 | 28 | #include <linux/timer.h> |
45711f1a | 29 | #include <linux/scatterlist.h> |
1da177e4 LT |
30 | #include <scsi/scsi.h> |
31 | ||
32 | #define DRV_NAME "ub" | |
1da177e4 LT |
33 | |
34 | #define UB_MAJOR 180 | |
35 | ||
1872bceb PZ |
36 | /* |
37 | * The command state machine is the key model for understanding of this driver. | |
38 | * | |
39 | * The general rule is that all transitions are done towards the bottom | |
40 | * of the diagram, thus preventing any loops. | |
41 | * | |
42 | * An exception to that is how the STAT state is handled. A counter allows it | |
43 | * to be re-entered along the path marked with [C]. | |
44 | * | |
45 | * +--------+ | |
46 | * ! INIT ! | |
47 | * +--------+ | |
48 | * ! | |
49 | * ub_scsi_cmd_start fails ->--------------------------------------\ | |
50 | * ! ! | |
51 | * V ! | |
52 | * +--------+ ! | |
53 | * ! CMD ! ! | |
54 | * +--------+ ! | |
55 | * ! +--------+ ! | |
56 | * was -EPIPE -->-------------------------------->! CLEAR ! ! | |
57 | * ! +--------+ ! | |
58 | * ! ! ! | |
59 | * was error -->------------------------------------- ! --------->\ | |
60 | * ! ! ! | |
61 | * /--<-- cmd->dir == NONE ? ! ! | |
62 | * ! ! ! ! | |
63 | * ! V ! ! | |
64 | * ! +--------+ ! ! | |
65 | * ! ! DATA ! ! ! | |
66 | * ! +--------+ ! ! | |
67 | * ! ! +---------+ ! ! | |
68 | * ! was -EPIPE -->--------------->! CLR2STS ! ! ! | |
69 | * ! ! +---------+ ! ! | |
70 | * ! ! ! ! ! | |
71 | * ! ! was error -->---- ! --------->\ | |
72 | * ! was error -->--------------------- ! ------------- ! --------->\ | |
73 | * ! ! ! ! ! | |
74 | * ! V ! ! ! | |
75 | * \--->+--------+ ! ! ! | |
76 | * ! STAT !<--------------------------/ ! ! | |
77 | * /--->+--------+ ! ! | |
78 | * ! ! ! ! | |
79 | * [C] was -EPIPE -->-----------\ ! ! | |
80 | * ! ! ! ! ! | |
81 | * +<---- len == 0 ! ! ! | |
82 | * ! ! ! ! ! | |
83 | * ! was error -->--------------------------------------!---------->\ | |
84 | * ! ! ! ! ! | |
85 | * +<---- bad CSW ! ! ! | |
86 | * +<---- bad tag ! ! ! | |
87 | * ! ! V ! ! | |
88 | * ! ! +--------+ ! ! | |
89 | * ! ! ! CLRRS ! ! ! | |
90 | * ! ! +--------+ ! ! | |
91 | * ! ! ! ! ! | |
92 | * \------- ! --------------------[C]--------\ ! ! | |
93 | * ! ! ! ! | |
94 | * cmd->error---\ +--------+ ! ! | |
95 | * ! +--------------->! SENSE !<----------/ ! | |
96 | * STAT_FAIL----/ +--------+ ! | |
97 | * ! ! V | |
98 | * ! V +--------+ | |
99 | * \--------------------------------\--------------------->! DONE ! | |
100 | * +--------+ | |
101 | */ | |
102 | ||
1da177e4 | 103 | /* |
f4800078 PZ |
104 | * This many LUNs per USB device. |
105 | * Every one of them takes a host, see UB_MAX_HOSTS. | |
1da177e4 | 106 | */ |
9f793d2c | 107 | #define UB_MAX_LUNS 9 |
f4800078 PZ |
108 | |
109 | /* | |
110 | */ | |
111 | ||
4fb729f5 | 112 | #define UB_PARTS_PER_LUN 8 |
1da177e4 LT |
113 | |
114 | #define UB_MAX_CDB_SIZE 16 /* Corresponds to Bulk */ | |
115 | ||
116 | #define UB_SENSE_SIZE 18 | |
117 | ||
118 | /* | |
119 | */ | |
120 | ||
121 | /* command block wrapper */ | |
122 | struct bulk_cb_wrap { | |
123 | __le32 Signature; /* contains 'USBC' */ | |
124 | u32 Tag; /* unique per command id */ | |
125 | __le32 DataTransferLength; /* size of data */ | |
126 | u8 Flags; /* direction in bit 0 */ | |
f4800078 | 127 | u8 Lun; /* LUN */ |
1da177e4 LT |
128 | u8 Length; /* of of the CDB */ |
129 | u8 CDB[UB_MAX_CDB_SIZE]; /* max command */ | |
130 | }; | |
131 | ||
132 | #define US_BULK_CB_WRAP_LEN 31 | |
133 | #define US_BULK_CB_SIGN 0x43425355 /*spells out USBC */ | |
134 | #define US_BULK_FLAG_IN 1 | |
135 | #define US_BULK_FLAG_OUT 0 | |
136 | ||
137 | /* command status wrapper */ | |
138 | struct bulk_cs_wrap { | |
139 | __le32 Signature; /* should = 'USBS' */ | |
140 | u32 Tag; /* same as original command */ | |
141 | __le32 Residue; /* amount not transferred */ | |
142 | u8 Status; /* see below */ | |
143 | }; | |
144 | ||
145 | #define US_BULK_CS_WRAP_LEN 13 | |
146 | #define US_BULK_CS_SIGN 0x53425355 /* spells out 'USBS' */ | |
1da177e4 LT |
147 | #define US_BULK_STAT_OK 0 |
148 | #define US_BULK_STAT_FAIL 1 | |
149 | #define US_BULK_STAT_PHASE 2 | |
150 | ||
151 | /* bulk-only class specific requests */ | |
152 | #define US_BULK_RESET_REQUEST 0xff | |
153 | #define US_BULK_GET_MAX_LUN 0xfe | |
154 | ||
155 | /* | |
156 | */ | |
157 | struct ub_dev; | |
158 | ||
64bd8453 | 159 | #define UB_MAX_REQ_SG 9 /* cdrecord requires 32KB and maybe a header */ |
1da177e4 LT |
160 | #define UB_MAX_SECTORS 64 |
161 | ||
162 | /* | |
163 | * A second is more than enough for a 32K transfer (UB_MAX_SECTORS) | |
164 | * even if a webcam hogs the bus, but some devices need time to spin up. | |
165 | */ | |
166 | #define UB_URB_TIMEOUT (HZ*2) | |
167 | #define UB_DATA_TIMEOUT (HZ*5) /* ZIP does spin-ups in the data phase */ | |
168 | #define UB_STAT_TIMEOUT (HZ*5) /* Same spinups and eject for a dataless cmd. */ | |
169 | #define UB_CTRL_TIMEOUT (HZ/2) /* 500ms ought to be enough to clear a stall */ | |
170 | ||
171 | /* | |
172 | * An instance of a SCSI command in transit. | |
173 | */ | |
174 | #define UB_DIR_NONE 0 | |
175 | #define UB_DIR_READ 1 | |
176 | #define UB_DIR_ILLEGAL2 2 | |
177 | #define UB_DIR_WRITE 3 | |
178 | ||
179 | #define UB_DIR_CHAR(c) (((c)==UB_DIR_WRITE)? 'w': \ | |
180 | (((c)==UB_DIR_READ)? 'r': 'n')) | |
181 | ||
182 | enum ub_scsi_cmd_state { | |
183 | UB_CMDST_INIT, /* Initial state */ | |
184 | UB_CMDST_CMD, /* Command submitted */ | |
185 | UB_CMDST_DATA, /* Data phase */ | |
186 | UB_CMDST_CLR2STS, /* Clearing before requesting status */ | |
187 | UB_CMDST_STAT, /* Status phase */ | |
188 | UB_CMDST_CLEAR, /* Clearing a stall (halt, actually) */ | |
1872bceb | 189 | UB_CMDST_CLRRS, /* Clearing before retrying status */ |
1da177e4 LT |
190 | UB_CMDST_SENSE, /* Sending Request Sense */ |
191 | UB_CMDST_DONE /* Final state */ | |
192 | }; | |
193 | ||
1da177e4 LT |
194 | struct ub_scsi_cmd { |
195 | unsigned char cdb[UB_MAX_CDB_SIZE]; | |
196 | unsigned char cdb_len; | |
197 | ||
198 | unsigned char dir; /* 0 - none, 1 - read, 3 - write. */ | |
1da177e4 LT |
199 | enum ub_scsi_cmd_state state; |
200 | unsigned int tag; | |
201 | struct ub_scsi_cmd *next; | |
202 | ||
203 | int error; /* Return code - valid upon done */ | |
204 | unsigned int act_len; /* Return size */ | |
205 | unsigned char key, asc, ascq; /* May be valid if error==-EIO */ | |
206 | ||
207 | int stat_count; /* Retries getting status. */ | |
2c51ae70 | 208 | unsigned int timeo; /* jiffies until rq->timeout changes */ |
1da177e4 | 209 | |
1da177e4 | 210 | unsigned int len; /* Requested length */ |
a1cf96ef PZ |
211 | unsigned int current_sg; |
212 | unsigned int nsg; /* sgv[nsg] */ | |
213 | struct scatterlist sgv[UB_MAX_REQ_SG]; | |
1da177e4 | 214 | |
f4800078 | 215 | struct ub_lun *lun; |
1da177e4 LT |
216 | void (*done)(struct ub_dev *, struct ub_scsi_cmd *); |
217 | void *back; | |
218 | }; | |
219 | ||
2c26c9e6 PZ |
220 | struct ub_request { |
221 | struct request *rq; | |
222 | unsigned int current_try; | |
223 | unsigned int nsg; /* sgv[nsg] */ | |
224 | struct scatterlist sgv[UB_MAX_REQ_SG]; | |
225 | }; | |
226 | ||
1da177e4 LT |
227 | /* |
228 | */ | |
229 | struct ub_capacity { | |
230 | unsigned long nsec; /* Linux size - 512 byte sectors */ | |
231 | unsigned int bsize; /* Linux hardsect_size */ | |
232 | unsigned int bshift; /* Shift between 512 and hard sects */ | |
233 | }; | |
234 | ||
1da177e4 LT |
235 | /* |
236 | * This is a direct take-off from linux/include/completion.h | |
237 | * The difference is that I do not wait on this thing, just poll. | |
238 | * When I want to wait (ub_probe), I just use the stock completion. | |
239 | * | |
240 | * Note that INIT_COMPLETION takes no lock. It is correct. But why | |
241 | * in the bloody hell that thing takes struct instead of pointer to struct | |
242 | * is quite beyond me. I just copied it from the stock completion. | |
243 | */ | |
244 | struct ub_completion { | |
245 | unsigned int done; | |
246 | spinlock_t lock; | |
247 | }; | |
248 | ||
249 | static inline void ub_init_completion(struct ub_completion *x) | |
250 | { | |
251 | x->done = 0; | |
252 | spin_lock_init(&x->lock); | |
253 | } | |
254 | ||
255 | #define UB_INIT_COMPLETION(x) ((x).done = 0) | |
256 | ||
257 | static void ub_complete(struct ub_completion *x) | |
258 | { | |
259 | unsigned long flags; | |
260 | ||
261 | spin_lock_irqsave(&x->lock, flags); | |
262 | x->done++; | |
263 | spin_unlock_irqrestore(&x->lock, flags); | |
264 | } | |
265 | ||
266 | static int ub_is_completed(struct ub_completion *x) | |
267 | { | |
268 | unsigned long flags; | |
269 | int ret; | |
270 | ||
271 | spin_lock_irqsave(&x->lock, flags); | |
272 | ret = x->done; | |
273 | spin_unlock_irqrestore(&x->lock, flags); | |
274 | return ret; | |
275 | } | |
276 | ||
277 | /* | |
278 | */ | |
279 | struct ub_scsi_cmd_queue { | |
280 | int qlen, qmax; | |
281 | struct ub_scsi_cmd *head, *tail; | |
282 | }; | |
283 | ||
284 | /* | |
f4800078 PZ |
285 | * The block device instance (one per LUN). |
286 | */ | |
287 | struct ub_lun { | |
288 | struct ub_dev *udev; | |
289 | struct list_head link; | |
290 | struct gendisk *disk; | |
291 | int id; /* Host index */ | |
292 | int num; /* LUN number */ | |
293 | char name[16]; | |
294 | ||
295 | int changed; /* Media was changed */ | |
296 | int removable; | |
297 | int readonly; | |
f4800078 | 298 | |
2c26c9e6 PZ |
299 | struct ub_request urq; |
300 | ||
f4800078 PZ |
301 | /* Use Ingo's mempool if or when we have more than one command. */ |
302 | /* | |
303 | * Currently we never need more than one command for the whole device. | |
304 | * However, giving every LUN a command is a cheap and automatic way | |
305 | * to enforce fairness between them. | |
306 | */ | |
307 | int cmda[1]; | |
308 | struct ub_scsi_cmd cmdv[1]; | |
309 | ||
310 | struct ub_capacity capacity; | |
311 | }; | |
312 | ||
313 | /* | |
314 | * The USB device instance. | |
1da177e4 LT |
315 | */ |
316 | struct ub_dev { | |
65b4fe55 | 317 | spinlock_t *lock; |
1da177e4 LT |
318 | atomic_t poison; /* The USB device is disconnected */ |
319 | int openc; /* protected by ub_lock! */ | |
320 | /* kref is too implicit for our taste */ | |
2c26c9e6 | 321 | int reset; /* Reset is running */ |
0da13c8c | 322 | int bad_resid; |
1da177e4 | 323 | unsigned int tagcnt; |
f4800078 | 324 | char name[12]; |
1da177e4 LT |
325 | struct usb_device *dev; |
326 | struct usb_interface *intf; | |
327 | ||
f4800078 | 328 | struct list_head luns; |
1da177e4 LT |
329 | |
330 | unsigned int send_bulk_pipe; /* cached pipe values */ | |
331 | unsigned int recv_bulk_pipe; | |
332 | unsigned int send_ctrl_pipe; | |
333 | unsigned int recv_ctrl_pipe; | |
334 | ||
335 | struct tasklet_struct tasklet; | |
336 | ||
1da177e4 LT |
337 | struct ub_scsi_cmd_queue cmd_queue; |
338 | struct ub_scsi_cmd top_rqs_cmd; /* REQUEST SENSE */ | |
339 | unsigned char top_sense[UB_SENSE_SIZE]; | |
340 | ||
341 | struct ub_completion work_done; | |
342 | struct urb work_urb; | |
343 | struct timer_list work_timer; | |
344 | int last_pipe; /* What might need clearing */ | |
1872bceb | 345 | __le32 signature; /* Learned signature */ |
1da177e4 LT |
346 | struct bulk_cb_wrap work_bcb; |
347 | struct bulk_cs_wrap work_bcs; | |
348 | struct usb_ctrlrequest work_cr; | |
349 | ||
2c26c9e6 PZ |
350 | struct work_struct reset_work; |
351 | wait_queue_head_t reset_wait; | |
1da177e4 LT |
352 | }; |
353 | ||
354 | /* | |
355 | */ | |
356 | static void ub_cleanup(struct ub_dev *sc); | |
6c1eb8c1 | 357 | static int ub_request_fn_1(struct ub_lun *lun, struct request *rq); |
2c26c9e6 PZ |
358 | static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, |
359 | struct ub_scsi_cmd *cmd, struct ub_request *urq); | |
360 | static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, | |
361 | struct ub_scsi_cmd *cmd, struct ub_request *urq); | |
1da177e4 | 362 | static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd); |
3755100d | 363 | static void ub_end_rq(struct request *rq, unsigned int status); |
2c26c9e6 PZ |
364 | static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun, |
365 | struct ub_request *urq, struct ub_scsi_cmd *cmd); | |
1da177e4 | 366 | static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd); |
7d12e780 | 367 | static void ub_urb_complete(struct urb *urb); |
1da177e4 LT |
368 | static void ub_scsi_action(unsigned long _dev); |
369 | static void ub_scsi_dispatch(struct ub_dev *sc); | |
370 | static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd); | |
a1cf96ef | 371 | static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd); |
1da177e4 | 372 | static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc); |
1872bceb | 373 | static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd); |
1da177e4 | 374 | static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd); |
1872bceb | 375 | static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd); |
1da177e4 LT |
376 | static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd); |
377 | static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd, | |
378 | int stalled_pipe); | |
379 | static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd); | |
2c2e4a2e | 380 | static void ub_reset_enter(struct ub_dev *sc, int try); |
c4028958 | 381 | static void ub_reset_task(struct work_struct *work); |
f4800078 PZ |
382 | static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun); |
383 | static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, | |
384 | struct ub_capacity *ret); | |
2c2e4a2e PZ |
385 | static int ub_sync_reset(struct ub_dev *sc); |
386 | static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe); | |
f4800078 | 387 | static int ub_probe_lun(struct ub_dev *sc, int lnum); |
1da177e4 LT |
388 | |
389 | /* | |
390 | */ | |
a00828e9 PZ |
391 | #ifdef CONFIG_USB_LIBUSUAL |
392 | ||
e6e244b6 | 393 | #define ub_usb_ids usb_storage_usb_ids |
a00828e9 PZ |
394 | #else |
395 | ||
1da177e4 | 396 | static struct usb_device_id ub_usb_ids[] = { |
1da177e4 LT |
397 | { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_SCSI, US_PR_BULK) }, |
398 | { } | |
399 | }; | |
400 | ||
401 | MODULE_DEVICE_TABLE(usb, ub_usb_ids); | |
a00828e9 | 402 | #endif /* CONFIG_USB_LIBUSUAL */ |
1da177e4 LT |
403 | |
404 | /* | |
405 | * Find me a way to identify "next free minor" for add_disk(), | |
406 | * and the array disappears the next day. However, the number of | |
407 | * hosts has something to do with the naming and /proc/partitions. | |
408 | * This has to be thought out in detail before changing. | |
409 | * If UB_MAX_HOST was 1000, we'd use a bitmap. Or a better data structure. | |
410 | */ | |
411 | #define UB_MAX_HOSTS 26 | |
412 | static char ub_hostv[UB_MAX_HOSTS]; | |
f4800078 | 413 | |
65b4fe55 PZ |
414 | #define UB_QLOCK_NUM 5 |
415 | static spinlock_t ub_qlockv[UB_QLOCK_NUM]; | |
416 | static int ub_qlock_next = 0; | |
417 | ||
1da177e4 LT |
418 | static DEFINE_SPINLOCK(ub_lock); /* Locks globals and ->openc */ |
419 | ||
1da177e4 LT |
420 | /* |
421 | * The id allocator. | |
422 | * | |
423 | * This also stores the host for indexing by minor, which is somewhat dirty. | |
424 | */ | |
425 | static int ub_id_get(void) | |
426 | { | |
427 | unsigned long flags; | |
428 | int i; | |
429 | ||
430 | spin_lock_irqsave(&ub_lock, flags); | |
431 | for (i = 0; i < UB_MAX_HOSTS; i++) { | |
432 | if (ub_hostv[i] == 0) { | |
433 | ub_hostv[i] = 1; | |
434 | spin_unlock_irqrestore(&ub_lock, flags); | |
435 | return i; | |
436 | } | |
437 | } | |
438 | spin_unlock_irqrestore(&ub_lock, flags); | |
439 | return -1; | |
440 | } | |
441 | ||
442 | static void ub_id_put(int id) | |
443 | { | |
444 | unsigned long flags; | |
445 | ||
446 | if (id < 0 || id >= UB_MAX_HOSTS) { | |
447 | printk(KERN_ERR DRV_NAME ": bad host ID %d\n", id); | |
448 | return; | |
449 | } | |
450 | ||
451 | spin_lock_irqsave(&ub_lock, flags); | |
452 | if (ub_hostv[id] == 0) { | |
453 | spin_unlock_irqrestore(&ub_lock, flags); | |
454 | printk(KERN_ERR DRV_NAME ": freeing free host ID %d\n", id); | |
455 | return; | |
456 | } | |
457 | ub_hostv[id] = 0; | |
458 | spin_unlock_irqrestore(&ub_lock, flags); | |
459 | } | |
460 | ||
65b4fe55 PZ |
461 | /* |
462 | * This is necessitated by the fact that blk_cleanup_queue does not | |
463 | * necesserily destroy the queue. Instead, it may merely decrease q->refcnt. | |
464 | * Since our blk_init_queue() passes a spinlock common with ub_dev, | |
465 | * we have life time issues when ub_cleanup frees ub_dev. | |
466 | */ | |
467 | static spinlock_t *ub_next_lock(void) | |
468 | { | |
469 | unsigned long flags; | |
470 | spinlock_t *ret; | |
471 | ||
472 | spin_lock_irqsave(&ub_lock, flags); | |
473 | ret = &ub_qlockv[ub_qlock_next]; | |
474 | ub_qlock_next = (ub_qlock_next + 1) % UB_QLOCK_NUM; | |
475 | spin_unlock_irqrestore(&ub_lock, flags); | |
476 | return ret; | |
477 | } | |
478 | ||
1da177e4 LT |
479 | /* |
480 | * Downcount for deallocation. This rides on two assumptions: | |
481 | * - once something is poisoned, its refcount cannot grow | |
482 | * - opens cannot happen at this time (del_gendisk was done) | |
483 | * If the above is true, we can drop the lock, which we need for | |
484 | * blk_cleanup_queue(): the silly thing may attempt to sleep. | |
485 | * [Actually, it never needs to sleep for us, but it calls might_sleep()] | |
486 | */ | |
487 | static void ub_put(struct ub_dev *sc) | |
488 | { | |
489 | unsigned long flags; | |
490 | ||
491 | spin_lock_irqsave(&ub_lock, flags); | |
492 | --sc->openc; | |
493 | if (sc->openc == 0 && atomic_read(&sc->poison)) { | |
494 | spin_unlock_irqrestore(&ub_lock, flags); | |
495 | ub_cleanup(sc); | |
496 | } else { | |
497 | spin_unlock_irqrestore(&ub_lock, flags); | |
498 | } | |
499 | } | |
500 | ||
501 | /* | |
502 | * Final cleanup and deallocation. | |
503 | */ | |
504 | static void ub_cleanup(struct ub_dev *sc) | |
505 | { | |
f4800078 PZ |
506 | struct list_head *p; |
507 | struct ub_lun *lun; | |
165125e1 | 508 | struct request_queue *q; |
1da177e4 | 509 | |
f4800078 PZ |
510 | while (!list_empty(&sc->luns)) { |
511 | p = sc->luns.next; | |
512 | lun = list_entry(p, struct ub_lun, link); | |
513 | list_del(p); | |
1da177e4 | 514 | |
f4800078 PZ |
515 | /* I don't think queue can be NULL. But... Stolen from sx8.c */ |
516 | if ((q = lun->disk->queue) != NULL) | |
517 | blk_cleanup_queue(q); | |
518 | /* | |
519 | * If we zero disk->private_data BEFORE put_disk, we have | |
520 | * to check for NULL all over the place in open, release, | |
521 | * check_media and revalidate, because the block level | |
522 | * semaphore is well inside the put_disk. | |
523 | * But we cannot zero after the call, because *disk is gone. | |
524 | * The sd.c is blatantly racy in this area. | |
525 | */ | |
526 | /* disk->private_data = NULL; */ | |
527 | put_disk(lun->disk); | |
528 | lun->disk = NULL; | |
529 | ||
530 | ub_id_put(lun->id); | |
531 | kfree(lun); | |
532 | } | |
1da177e4 | 533 | |
77ef6c4d PZ |
534 | usb_set_intfdata(sc->intf, NULL); |
535 | usb_put_intf(sc->intf); | |
536 | usb_put_dev(sc->dev); | |
1da177e4 LT |
537 | kfree(sc); |
538 | } | |
539 | ||
540 | /* | |
541 | * The "command allocator". | |
542 | */ | |
f4800078 | 543 | static struct ub_scsi_cmd *ub_get_cmd(struct ub_lun *lun) |
1da177e4 LT |
544 | { |
545 | struct ub_scsi_cmd *ret; | |
546 | ||
f4800078 | 547 | if (lun->cmda[0]) |
1da177e4 | 548 | return NULL; |
f4800078 PZ |
549 | ret = &lun->cmdv[0]; |
550 | lun->cmda[0] = 1; | |
1da177e4 LT |
551 | return ret; |
552 | } | |
553 | ||
f4800078 | 554 | static void ub_put_cmd(struct ub_lun *lun, struct ub_scsi_cmd *cmd) |
1da177e4 | 555 | { |
f4800078 | 556 | if (cmd != &lun->cmdv[0]) { |
1da177e4 | 557 | printk(KERN_WARNING "%s: releasing a foreign cmd %p\n", |
f4800078 | 558 | lun->name, cmd); |
1da177e4 LT |
559 | return; |
560 | } | |
f4800078 PZ |
561 | if (!lun->cmda[0]) { |
562 | printk(KERN_WARNING "%s: releasing a free cmd\n", lun->name); | |
1da177e4 LT |
563 | return; |
564 | } | |
f4800078 | 565 | lun->cmda[0] = 0; |
1da177e4 LT |
566 | } |
567 | ||
568 | /* | |
569 | * The command queue. | |
570 | */ | |
571 | static void ub_cmdq_add(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |
572 | { | |
573 | struct ub_scsi_cmd_queue *t = &sc->cmd_queue; | |
574 | ||
575 | if (t->qlen++ == 0) { | |
576 | t->head = cmd; | |
577 | t->tail = cmd; | |
578 | } else { | |
579 | t->tail->next = cmd; | |
580 | t->tail = cmd; | |
581 | } | |
582 | ||
583 | if (t->qlen > t->qmax) | |
584 | t->qmax = t->qlen; | |
585 | } | |
586 | ||
587 | static void ub_cmdq_insert(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |
588 | { | |
589 | struct ub_scsi_cmd_queue *t = &sc->cmd_queue; | |
590 | ||
591 | if (t->qlen++ == 0) { | |
592 | t->head = cmd; | |
593 | t->tail = cmd; | |
594 | } else { | |
595 | cmd->next = t->head; | |
596 | t->head = cmd; | |
597 | } | |
598 | ||
599 | if (t->qlen > t->qmax) | |
600 | t->qmax = t->qlen; | |
601 | } | |
602 | ||
603 | static struct ub_scsi_cmd *ub_cmdq_pop(struct ub_dev *sc) | |
604 | { | |
605 | struct ub_scsi_cmd_queue *t = &sc->cmd_queue; | |
606 | struct ub_scsi_cmd *cmd; | |
607 | ||
608 | if (t->qlen == 0) | |
609 | return NULL; | |
610 | if (--t->qlen == 0) | |
611 | t->tail = NULL; | |
612 | cmd = t->head; | |
613 | t->head = cmd->next; | |
614 | cmd->next = NULL; | |
615 | return cmd; | |
616 | } | |
617 | ||
618 | #define ub_cmdq_peek(sc) ((sc)->cmd_queue.head) | |
619 | ||
620 | /* | |
621 | * The request function is our main entry point | |
622 | */ | |
623 | ||
165125e1 | 624 | static void ub_request_fn(struct request_queue *q) |
1da177e4 | 625 | { |
f4800078 | 626 | struct ub_lun *lun = q->queuedata; |
1da177e4 LT |
627 | struct request *rq; |
628 | ||
9934c8c0 | 629 | while ((rq = blk_peek_request(q)) != NULL) { |
6c1eb8c1 | 630 | if (ub_request_fn_1(lun, rq) != 0) { |
1da177e4 LT |
631 | blk_stop_queue(q); |
632 | break; | |
633 | } | |
634 | } | |
635 | } | |
636 | ||
6c1eb8c1 | 637 | static int ub_request_fn_1(struct ub_lun *lun, struct request *rq) |
1da177e4 | 638 | { |
f4800078 | 639 | struct ub_dev *sc = lun->udev; |
1da177e4 | 640 | struct ub_scsi_cmd *cmd; |
2c26c9e6 PZ |
641 | struct ub_request *urq; |
642 | int n_elem; | |
1da177e4 | 643 | |
d1ad4ea3 | 644 | if (atomic_read(&sc->poison)) { |
9934c8c0 | 645 | blk_start_request(rq); |
3755100d | 646 | ub_end_rq(rq, DID_NO_CONNECT << 16); |
d1ad4ea3 PZ |
647 | return 0; |
648 | } | |
649 | ||
650 | if (lun->changed && !blk_pc_request(rq)) { | |
9934c8c0 | 651 | blk_start_request(rq); |
3755100d | 652 | ub_end_rq(rq, SAM_STAT_CHECK_CONDITION); |
1da177e4 LT |
653 | return 0; |
654 | } | |
655 | ||
2c26c9e6 PZ |
656 | if (lun->urq.rq != NULL) |
657 | return -1; | |
f4800078 | 658 | if ((cmd = ub_get_cmd(lun)) == NULL) |
1da177e4 LT |
659 | return -1; |
660 | memset(cmd, 0, sizeof(struct ub_scsi_cmd)); | |
661 | ||
9934c8c0 | 662 | blk_start_request(rq); |
2c26c9e6 PZ |
663 | |
664 | urq = &lun->urq; | |
665 | memset(urq, 0, sizeof(struct ub_request)); | |
666 | urq->rq = rq; | |
667 | ||
668 | /* | |
669 | * get scatterlist from block layer | |
670 | */ | |
541645be | 671 | sg_init_table(&urq->sgv[0], UB_MAX_REQ_SG); |
2c26c9e6 PZ |
672 | n_elem = blk_rq_map_sg(lun->disk->queue, rq, &urq->sgv[0]); |
673 | if (n_elem < 0) { | |
b5600339 | 674 | /* Impossible, because blk_rq_map_sg should not hit ENOMEM. */ |
2c26c9e6 | 675 | printk(KERN_INFO "%s: failed request map (%d)\n", |
b5600339 | 676 | lun->name, n_elem); |
2c26c9e6 PZ |
677 | goto drop; |
678 | } | |
679 | if (n_elem > UB_MAX_REQ_SG) { /* Paranoia */ | |
680 | printk(KERN_WARNING "%s: request with %d segments\n", | |
681 | lun->name, n_elem); | |
682 | goto drop; | |
683 | } | |
684 | urq->nsg = n_elem; | |
2c26c9e6 | 685 | |
1da177e4 | 686 | if (blk_pc_request(rq)) { |
2c26c9e6 | 687 | ub_cmd_build_packet(sc, lun, cmd, urq); |
1da177e4 | 688 | } else { |
2c26c9e6 | 689 | ub_cmd_build_block(sc, lun, cmd, urq); |
1da177e4 | 690 | } |
1da177e4 | 691 | cmd->state = UB_CMDST_INIT; |
f4800078 | 692 | cmd->lun = lun; |
1da177e4 | 693 | cmd->done = ub_rw_cmd_done; |
2c26c9e6 | 694 | cmd->back = urq; |
1da177e4 LT |
695 | |
696 | cmd->tag = sc->tagcnt++; | |
2c26c9e6 PZ |
697 | if (ub_submit_scsi(sc, cmd) != 0) |
698 | goto drop; | |
699 | ||
700 | return 0; | |
1da177e4 | 701 | |
2c26c9e6 PZ |
702 | drop: |
703 | ub_put_cmd(lun, cmd); | |
3755100d | 704 | ub_end_rq(rq, DID_ERROR << 16); |
1da177e4 LT |
705 | return 0; |
706 | } | |
707 | ||
2c26c9e6 PZ |
708 | static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, |
709 | struct ub_scsi_cmd *cmd, struct ub_request *urq) | |
1da177e4 | 710 | { |
2c26c9e6 | 711 | struct request *rq = urq->rq; |
a1cf96ef | 712 | unsigned int block, nblks; |
1da177e4 LT |
713 | |
714 | if (rq_data_dir(rq) == WRITE) | |
2c26c9e6 | 715 | cmd->dir = UB_DIR_WRITE; |
1da177e4 | 716 | else |
2c26c9e6 | 717 | cmd->dir = UB_DIR_READ; |
1da177e4 | 718 | |
2c26c9e6 PZ |
719 | cmd->nsg = urq->nsg; |
720 | memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg); | |
1da177e4 LT |
721 | |
722 | /* | |
723 | * build the command | |
724 | * | |
e1defc4f | 725 | * The call to blk_queue_logical_block_size() guarantees that request |
1da177e4 LT |
726 | * is aligned, but it is given in terms of 512 byte units, always. |
727 | */ | |
83096ebf TH |
728 | block = blk_rq_pos(rq) >> lun->capacity.bshift; |
729 | nblks = blk_rq_sectors(rq) >> lun->capacity.bshift; | |
ba6abf13 | 730 | |
2c26c9e6 | 731 | cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10; |
1da177e4 LT |
732 | /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */ |
733 | cmd->cdb[2] = block >> 24; | |
734 | cmd->cdb[3] = block >> 16; | |
735 | cmd->cdb[4] = block >> 8; | |
736 | cmd->cdb[5] = block; | |
737 | cmd->cdb[7] = nblks >> 8; | |
738 | cmd->cdb[8] = nblks; | |
739 | cmd->cdb_len = 10; | |
740 | ||
1011c1b9 | 741 | cmd->len = blk_rq_bytes(rq); |
1da177e4 LT |
742 | } |
743 | ||
2c26c9e6 PZ |
744 | static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, |
745 | struct ub_scsi_cmd *cmd, struct ub_request *urq) | |
1da177e4 | 746 | { |
2c26c9e6 | 747 | struct request *rq = urq->rq; |
1da177e4 | 748 | |
b0790410 | 749 | if (blk_rq_bytes(rq) == 0) { |
1da177e4 LT |
750 | cmd->dir = UB_DIR_NONE; |
751 | } else { | |
752 | if (rq_data_dir(rq) == WRITE) | |
753 | cmd->dir = UB_DIR_WRITE; | |
754 | else | |
755 | cmd->dir = UB_DIR_READ; | |
756 | } | |
a1cf96ef | 757 | |
2c26c9e6 PZ |
758 | cmd->nsg = urq->nsg; |
759 | memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg); | |
a1cf96ef PZ |
760 | |
761 | memcpy(&cmd->cdb, rq->cmd, rq->cmd_len); | |
762 | cmd->cdb_len = rq->cmd_len; | |
763 | ||
b0790410 | 764 | cmd->len = blk_rq_bytes(rq); |
2c51ae70 PZ |
765 | |
766 | /* | |
767 | * To reapply this to every URB is not as incorrect as it looks. | |
768 | * In return, we avoid any complicated tracking calculations. | |
769 | */ | |
770 | cmd->timeo = rq->timeout; | |
1da177e4 LT |
771 | } |
772 | ||
773 | static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |
774 | { | |
f4800078 | 775 | struct ub_lun *lun = cmd->lun; |
2c26c9e6 PZ |
776 | struct ub_request *urq = cmd->back; |
777 | struct request *rq; | |
d1ad4ea3 | 778 | unsigned int scsi_status; |
1da177e4 | 779 | |
2c26c9e6 PZ |
780 | rq = urq->rq; |
781 | ||
a1cf96ef | 782 | if (cmd->error == 0) { |
a1cf96ef | 783 | if (blk_pc_request(rq)) { |
5f49f631 TH |
784 | if (cmd->act_len >= rq->resid_len) |
785 | rq->resid_len = 0; | |
786 | else | |
787 | rq->resid_len -= cmd->act_len; | |
ef45cb62 PZ |
788 | scsi_status = 0; |
789 | } else { | |
790 | if (cmd->act_len != cmd->len) { | |
ef45cb62 PZ |
791 | scsi_status = SAM_STAT_CHECK_CONDITION; |
792 | } else { | |
793 | scsi_status = 0; | |
794 | } | |
ba6abf13 | 795 | } |
a1cf96ef | 796 | } else { |
a1cf96ef PZ |
797 | if (blk_pc_request(rq)) { |
798 | /* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */ | |
799 | memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE); | |
800 | rq->sense_len = UB_SENSE_SIZE; | |
801 | if (sc->top_sense[0] != 0) | |
d1ad4ea3 | 802 | scsi_status = SAM_STAT_CHECK_CONDITION; |
a1cf96ef | 803 | else |
d1ad4ea3 | 804 | scsi_status = DID_ERROR << 16; |
2c26c9e6 | 805 | } else { |
82fe26ba PZ |
806 | if (cmd->error == -EIO && |
807 | (cmd->key == 0 || | |
808 | cmd->key == MEDIUM_ERROR || | |
809 | cmd->key == UNIT_ATTENTION)) { | |
2c26c9e6 PZ |
810 | if (ub_rw_cmd_retry(sc, lun, urq, cmd) == 0) |
811 | return; | |
812 | } | |
d1ad4ea3 | 813 | scsi_status = SAM_STAT_CHECK_CONDITION; |
a1cf96ef PZ |
814 | } |
815 | } | |
ba6abf13 | 816 | |
2c26c9e6 PZ |
817 | urq->rq = NULL; |
818 | ||
f4800078 | 819 | ub_put_cmd(lun, cmd); |
3755100d | 820 | ub_end_rq(rq, scsi_status); |
ba6abf13 | 821 | blk_start_queue(lun->disk->queue); |
1da177e4 LT |
822 | } |
823 | ||
3755100d | 824 | static void ub_end_rq(struct request *rq, unsigned int scsi_status) |
1da177e4 | 825 | { |
7d699baf | 826 | int error; |
d1ad4ea3 PZ |
827 | |
828 | if (scsi_status == 0) { | |
7d699baf | 829 | error = 0; |
d1ad4ea3 | 830 | } else { |
7d699baf | 831 | error = -EIO; |
d1ad4ea3 PZ |
832 | rq->errors = scsi_status; |
833 | } | |
3755100d | 834 | __blk_end_request_all(rq, error); |
1da177e4 LT |
835 | } |
836 | ||
2c26c9e6 PZ |
837 | static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun, |
838 | struct ub_request *urq, struct ub_scsi_cmd *cmd) | |
839 | { | |
840 | ||
841 | if (atomic_read(&sc->poison)) | |
842 | return -ENXIO; | |
843 | ||
2c2e4a2e | 844 | ub_reset_enter(sc, urq->current_try); |
2c26c9e6 PZ |
845 | |
846 | if (urq->current_try >= 3) | |
847 | return -EIO; | |
848 | urq->current_try++; | |
b5600339 PZ |
849 | |
850 | /* Remove this if anyone complains of flooding. */ | |
851 | printk(KERN_DEBUG "%s: dir %c len/act %d/%d " | |
2c26c9e6 PZ |
852 | "[sense %x %02x %02x] retry %d\n", |
853 | sc->name, UB_DIR_CHAR(cmd->dir), cmd->len, cmd->act_len, | |
854 | cmd->key, cmd->asc, cmd->ascq, urq->current_try); | |
855 | ||
856 | memset(cmd, 0, sizeof(struct ub_scsi_cmd)); | |
857 | ub_cmd_build_block(sc, lun, cmd, urq); | |
858 | ||
859 | cmd->state = UB_CMDST_INIT; | |
860 | cmd->lun = lun; | |
861 | cmd->done = ub_rw_cmd_done; | |
862 | cmd->back = urq; | |
863 | ||
864 | cmd->tag = sc->tagcnt++; | |
865 | ||
866 | #if 0 /* Wasteful */ | |
867 | return ub_submit_scsi(sc, cmd); | |
868 | #else | |
869 | ub_cmdq_add(sc, cmd); | |
870 | return 0; | |
871 | #endif | |
872 | } | |
873 | ||
1da177e4 LT |
874 | /* |
875 | * Submit a regular SCSI operation (not an auto-sense). | |
876 | * | |
877 | * The Iron Law of Good Submit Routine is: | |
878 | * Zero return - callback is done, Nonzero return - callback is not done. | |
879 | * No exceptions. | |
880 | * | |
881 | * Host is assumed locked. | |
1da177e4 LT |
882 | */ |
883 | static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |
884 | { | |
885 | ||
886 | if (cmd->state != UB_CMDST_INIT || | |
887 | (cmd->dir != UB_DIR_NONE && cmd->len == 0)) { | |
888 | return -EINVAL; | |
889 | } | |
890 | ||
891 | ub_cmdq_add(sc, cmd); | |
892 | /* | |
893 | * We can call ub_scsi_dispatch(sc) right away here, but it's a little | |
894 | * safer to jump to a tasklet, in case upper layers do something silly. | |
895 | */ | |
896 | tasklet_schedule(&sc->tasklet); | |
897 | return 0; | |
898 | } | |
899 | ||
900 | /* | |
901 | * Submit the first URB for the queued command. | |
902 | * This function does not deal with queueing in any way. | |
903 | */ | |
904 | static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |
905 | { | |
906 | struct bulk_cb_wrap *bcb; | |
907 | int rc; | |
908 | ||
909 | bcb = &sc->work_bcb; | |
910 | ||
911 | /* | |
912 | * ``If the allocation length is eighteen or greater, and a device | |
913 | * server returns less than eithteen bytes of data, the application | |
914 | * client should assume that the bytes not transferred would have been | |
915 | * zeroes had the device server returned those bytes.'' | |
916 | * | |
917 | * We zero sense for all commands so that when a packet request | |
918 | * fails it does not return a stale sense. | |
919 | */ | |
920 | memset(&sc->top_sense, 0, UB_SENSE_SIZE); | |
921 | ||
922 | /* set up the command wrapper */ | |
923 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); | |
924 | bcb->Tag = cmd->tag; /* Endianness is not important */ | |
925 | bcb->DataTransferLength = cpu_to_le32(cmd->len); | |
926 | bcb->Flags = (cmd->dir == UB_DIR_READ) ? 0x80 : 0; | |
f4800078 | 927 | bcb->Lun = (cmd->lun != NULL) ? cmd->lun->num : 0; |
1da177e4 LT |
928 | bcb->Length = cmd->cdb_len; |
929 | ||
930 | /* copy the command payload */ | |
931 | memcpy(bcb->CDB, cmd->cdb, UB_MAX_CDB_SIZE); | |
932 | ||
933 | UB_INIT_COMPLETION(sc->work_done); | |
934 | ||
935 | sc->last_pipe = sc->send_bulk_pipe; | |
936 | usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe, | |
937 | bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc); | |
1da177e4 | 938 | |
1da177e4 LT |
939 | if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { |
940 | /* XXX Clear stalls */ | |
1da177e4 LT |
941 | ub_complete(&sc->work_done); |
942 | return rc; | |
943 | } | |
944 | ||
945 | sc->work_timer.expires = jiffies + UB_URB_TIMEOUT; | |
946 | add_timer(&sc->work_timer); | |
947 | ||
948 | cmd->state = UB_CMDST_CMD; | |
1da177e4 LT |
949 | return 0; |
950 | } | |
951 | ||
952 | /* | |
953 | * Timeout handler. | |
954 | */ | |
955 | static void ub_urb_timeout(unsigned long arg) | |
956 | { | |
957 | struct ub_dev *sc = (struct ub_dev *) arg; | |
958 | unsigned long flags; | |
959 | ||
65b4fe55 | 960 | spin_lock_irqsave(sc->lock, flags); |
b31f821c PZ |
961 | if (!ub_is_completed(&sc->work_done)) |
962 | usb_unlink_urb(&sc->work_urb); | |
65b4fe55 | 963 | spin_unlock_irqrestore(sc->lock, flags); |
1da177e4 LT |
964 | } |
965 | ||
966 | /* | |
967 | * Completion routine for the work URB. | |
968 | * | |
969 | * This can be called directly from usb_submit_urb (while we have | |
970 | * the sc->lock taken) and from an interrupt (while we do NOT have | |
971 | * the sc->lock taken). Therefore, bounce this off to a tasklet. | |
972 | */ | |
7d12e780 | 973 | static void ub_urb_complete(struct urb *urb) |
1da177e4 LT |
974 | { |
975 | struct ub_dev *sc = urb->context; | |
976 | ||
977 | ub_complete(&sc->work_done); | |
978 | tasklet_schedule(&sc->tasklet); | |
979 | } | |
980 | ||
981 | static void ub_scsi_action(unsigned long _dev) | |
982 | { | |
983 | struct ub_dev *sc = (struct ub_dev *) _dev; | |
984 | unsigned long flags; | |
985 | ||
65b4fe55 | 986 | spin_lock_irqsave(sc->lock, flags); |
1da177e4 | 987 | ub_scsi_dispatch(sc); |
65b4fe55 | 988 | spin_unlock_irqrestore(sc->lock, flags); |
1da177e4 LT |
989 | } |
990 | ||
991 | static void ub_scsi_dispatch(struct ub_dev *sc) | |
992 | { | |
993 | struct ub_scsi_cmd *cmd; | |
994 | int rc; | |
995 | ||
2c26c9e6 | 996 | while (!sc->reset && (cmd = ub_cmdq_peek(sc)) != NULL) { |
1da177e4 LT |
997 | if (cmd->state == UB_CMDST_DONE) { |
998 | ub_cmdq_pop(sc); | |
999 | (*cmd->done)(sc, cmd); | |
1000 | } else if (cmd->state == UB_CMDST_INIT) { | |
1da177e4 LT |
1001 | if ((rc = ub_scsi_cmd_start(sc, cmd)) == 0) |
1002 | break; | |
1003 | cmd->error = rc; | |
1004 | cmd->state = UB_CMDST_DONE; | |
1da177e4 LT |
1005 | } else { |
1006 | if (!ub_is_completed(&sc->work_done)) | |
1007 | break; | |
b31f821c | 1008 | del_timer(&sc->work_timer); |
1da177e4 LT |
1009 | ub_scsi_urb_compl(sc, cmd); |
1010 | } | |
1011 | } | |
1012 | } | |
1013 | ||
1014 | static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |
1015 | { | |
1016 | struct urb *urb = &sc->work_urb; | |
1017 | struct bulk_cs_wrap *bcs; | |
3444b26a | 1018 | int endp; |
2c26c9e6 | 1019 | int len; |
1da177e4 LT |
1020 | int rc; |
1021 | ||
1022 | if (atomic_read(&sc->poison)) { | |
2c26c9e6 PZ |
1023 | ub_state_done(sc, cmd, -ENODEV); |
1024 | return; | |
1da177e4 LT |
1025 | } |
1026 | ||
3444b26a DV |
1027 | endp = usb_pipeendpoint(sc->last_pipe); |
1028 | if (usb_pipein(sc->last_pipe)) | |
1029 | endp |= USB_DIR_IN; | |
1030 | ||
1da177e4 LT |
1031 | if (cmd->state == UB_CMDST_CLEAR) { |
1032 | if (urb->status == -EPIPE) { | |
1033 | /* | |
1034 | * STALL while clearning STALL. | |
1035 | * The control pipe clears itself - nothing to do. | |
1da177e4 | 1036 | */ |
f4800078 PZ |
1037 | printk(KERN_NOTICE "%s: stall on control pipe\n", |
1038 | sc->name); | |
1da177e4 LT |
1039 | goto Bad_End; |
1040 | } | |
1041 | ||
1042 | /* | |
1043 | * We ignore the result for the halt clear. | |
1044 | */ | |
1045 | ||
3444b26a | 1046 | usb_reset_endpoint(sc->dev, endp); |
1da177e4 LT |
1047 | |
1048 | ub_state_sense(sc, cmd); | |
1049 | ||
1050 | } else if (cmd->state == UB_CMDST_CLR2STS) { | |
1051 | if (urb->status == -EPIPE) { | |
f4800078 PZ |
1052 | printk(KERN_NOTICE "%s: stall on control pipe\n", |
1053 | sc->name); | |
1da177e4 LT |
1054 | goto Bad_End; |
1055 | } | |
1056 | ||
1057 | /* | |
1058 | * We ignore the result for the halt clear. | |
1059 | */ | |
1060 | ||
3444b26a | 1061 | usb_reset_endpoint(sc->dev, endp); |
1da177e4 LT |
1062 | |
1063 | ub_state_stat(sc, cmd); | |
1064 | ||
1872bceb PZ |
1065 | } else if (cmd->state == UB_CMDST_CLRRS) { |
1066 | if (urb->status == -EPIPE) { | |
1872bceb PZ |
1067 | printk(KERN_NOTICE "%s: stall on control pipe\n", |
1068 | sc->name); | |
1069 | goto Bad_End; | |
1070 | } | |
1071 | ||
1072 | /* | |
1073 | * We ignore the result for the halt clear. | |
1074 | */ | |
1075 | ||
3444b26a | 1076 | usb_reset_endpoint(sc->dev, endp); |
1872bceb PZ |
1077 | |
1078 | ub_state_stat_counted(sc, cmd); | |
1079 | ||
1da177e4 | 1080 | } else if (cmd->state == UB_CMDST_CMD) { |
2c26c9e6 PZ |
1081 | switch (urb->status) { |
1082 | case 0: | |
1083 | break; | |
1084 | case -EOVERFLOW: | |
1085 | goto Bad_End; | |
1086 | case -EPIPE: | |
1da177e4 LT |
1087 | rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); |
1088 | if (rc != 0) { | |
1089 | printk(KERN_NOTICE "%s: " | |
f4800078 PZ |
1090 | "unable to submit clear (%d)\n", |
1091 | sc->name, rc); | |
1da177e4 LT |
1092 | /* |
1093 | * This is typically ENOMEM or some other such shit. | |
1094 | * Retrying is pointless. Just do Bad End on it... | |
1095 | */ | |
2c26c9e6 PZ |
1096 | ub_state_done(sc, cmd, rc); |
1097 | return; | |
1da177e4 LT |
1098 | } |
1099 | cmd->state = UB_CMDST_CLEAR; | |
1da177e4 | 1100 | return; |
2c26c9e6 PZ |
1101 | case -ESHUTDOWN: /* unplug */ |
1102 | case -EILSEQ: /* unplug timeout on uhci */ | |
1103 | ub_state_done(sc, cmd, -ENODEV); | |
1104 | return; | |
1105 | default: | |
1da177e4 LT |
1106 | goto Bad_End; |
1107 | } | |
1108 | if (urb->actual_length != US_BULK_CB_WRAP_LEN) { | |
1da177e4 LT |
1109 | goto Bad_End; |
1110 | } | |
1111 | ||
a1cf96ef | 1112 | if (cmd->dir == UB_DIR_NONE || cmd->nsg < 1) { |
1da177e4 LT |
1113 | ub_state_stat(sc, cmd); |
1114 | return; | |
1115 | } | |
1116 | ||
a1cf96ef PZ |
1117 | // udelay(125); // usb-storage has this |
1118 | ub_data_start(sc, cmd); | |
1da177e4 LT |
1119 | |
1120 | } else if (cmd->state == UB_CMDST_DATA) { | |
1121 | if (urb->status == -EPIPE) { | |
1122 | rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); | |
1123 | if (rc != 0) { | |
1124 | printk(KERN_NOTICE "%s: " | |
f4800078 PZ |
1125 | "unable to submit clear (%d)\n", |
1126 | sc->name, rc); | |
2c26c9e6 PZ |
1127 | ub_state_done(sc, cmd, rc); |
1128 | return; | |
1da177e4 LT |
1129 | } |
1130 | cmd->state = UB_CMDST_CLR2STS; | |
1da177e4 LT |
1131 | return; |
1132 | } | |
1133 | if (urb->status == -EOVERFLOW) { | |
1134 | /* | |
1135 | * A babble? Failure, but we must transfer CSW now. | |
1136 | */ | |
1137 | cmd->error = -EOVERFLOW; /* A cheap trick... */ | |
a1cf96ef PZ |
1138 | ub_state_stat(sc, cmd); |
1139 | return; | |
1da177e4 | 1140 | } |
2c26c9e6 PZ |
1141 | |
1142 | if (cmd->dir == UB_DIR_WRITE) { | |
1143 | /* | |
1144 | * Do not continue writes in case of a failure. | |
1145 | * Doing so would cause sectors to be mixed up, | |
1146 | * which is worse than sectors lost. | |
1147 | * | |
1148 | * We must try to read the CSW, or many devices | |
1149 | * get confused. | |
1150 | */ | |
1151 | len = urb->actual_length; | |
1152 | if (urb->status != 0 || | |
1153 | len != cmd->sgv[cmd->current_sg].length) { | |
1154 | cmd->act_len += len; | |
2c26c9e6 PZ |
1155 | |
1156 | cmd->error = -EIO; | |
1157 | ub_state_stat(sc, cmd); | |
1158 | return; | |
1159 | } | |
1160 | ||
1161 | } else { | |
1162 | /* | |
1163 | * If an error occurs on read, we record it, and | |
1164 | * continue to fetch data in order to avoid bubble. | |
1165 | * | |
1166 | * As a small shortcut, we stop if we detect that | |
1167 | * a CSW mixed into data. | |
1168 | */ | |
1169 | if (urb->status != 0) | |
1170 | cmd->error = -EIO; | |
1171 | ||
1172 | len = urb->actual_length; | |
1173 | if (urb->status != 0 || | |
1174 | len != cmd->sgv[cmd->current_sg].length) { | |
1175 | if ((len & 0x1FF) == US_BULK_CS_WRAP_LEN) | |
1176 | goto Bad_End; | |
1177 | } | |
1178 | } | |
1da177e4 | 1179 | |
a1cf96ef | 1180 | cmd->act_len += urb->actual_length; |
1da177e4 | 1181 | |
a1cf96ef PZ |
1182 | if (++cmd->current_sg < cmd->nsg) { |
1183 | ub_data_start(sc, cmd); | |
1184 | return; | |
1185 | } | |
1da177e4 LT |
1186 | ub_state_stat(sc, cmd); |
1187 | ||
1188 | } else if (cmd->state == UB_CMDST_STAT) { | |
1189 | if (urb->status == -EPIPE) { | |
1190 | rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); | |
1191 | if (rc != 0) { | |
1192 | printk(KERN_NOTICE "%s: " | |
f4800078 PZ |
1193 | "unable to submit clear (%d)\n", |
1194 | sc->name, rc); | |
2c26c9e6 PZ |
1195 | ub_state_done(sc, cmd, rc); |
1196 | return; | |
1da177e4 | 1197 | } |
1872bceb PZ |
1198 | |
1199 | /* | |
1200 | * Having a stall when getting CSW is an error, so | |
1201 | * make sure uppper levels are not oblivious to it. | |
1202 | */ | |
1203 | cmd->error = -EIO; /* A cheap trick... */ | |
1204 | ||
1205 | cmd->state = UB_CMDST_CLRRS; | |
1da177e4 LT |
1206 | return; |
1207 | } | |
2c26c9e6 PZ |
1208 | |
1209 | /* Catch everything, including -EOVERFLOW and other nasties. */ | |
1da177e4 LT |
1210 | if (urb->status != 0) |
1211 | goto Bad_End; | |
1212 | ||
1213 | if (urb->actual_length == 0) { | |
1872bceb | 1214 | ub_state_stat_counted(sc, cmd); |
1da177e4 LT |
1215 | return; |
1216 | } | |
1217 | ||
1218 | /* | |
1219 | * Check the returned Bulk protocol status. | |
1872bceb | 1220 | * The status block has to be validated first. |
1da177e4 LT |
1221 | */ |
1222 | ||
1223 | bcs = &sc->work_bcs; | |
1872bceb PZ |
1224 | |
1225 | if (sc->signature == cpu_to_le32(0)) { | |
1da177e4 | 1226 | /* |
1872bceb PZ |
1227 | * This is the first reply, so do not perform the check. |
1228 | * Instead, remember the signature the device uses | |
1229 | * for future checks. But do not allow a nul. | |
1da177e4 | 1230 | */ |
1872bceb PZ |
1231 | sc->signature = bcs->Signature; |
1232 | if (sc->signature == cpu_to_le32(0)) { | |
1233 | ub_state_stat_counted(sc, cmd); | |
1234 | return; | |
1235 | } | |
1236 | } else { | |
1237 | if (bcs->Signature != sc->signature) { | |
1238 | ub_state_stat_counted(sc, cmd); | |
1239 | return; | |
1240 | } | |
1da177e4 | 1241 | } |
1da177e4 LT |
1242 | |
1243 | if (bcs->Tag != cmd->tag) { | |
1244 | /* | |
1245 | * This usually happens when we disagree with the | |
1246 | * device's microcode about something. For instance, | |
1247 | * a few of them throw this after timeouts. They buffer | |
1248 | * commands and reply at commands we timed out before. | |
1249 | * Without flushing these replies we loop forever. | |
1250 | */ | |
1872bceb | 1251 | ub_state_stat_counted(sc, cmd); |
1da177e4 LT |
1252 | return; |
1253 | } | |
1254 | ||
0da13c8c PZ |
1255 | if (!sc->bad_resid) { |
1256 | len = le32_to_cpu(bcs->Residue); | |
1257 | if (len != cmd->len - cmd->act_len) { | |
1258 | /* | |
1259 | * Only start ignoring if this cmd ended well. | |
1260 | */ | |
1261 | if (cmd->len == cmd->act_len) { | |
1262 | printk(KERN_NOTICE "%s: " | |
1263 | "bad residual %d of %d, ignoring\n", | |
1264 | sc->name, len, cmd->len); | |
1265 | sc->bad_resid = 1; | |
1266 | } | |
1267 | } | |
1872bceb PZ |
1268 | } |
1269 | ||
1da177e4 LT |
1270 | switch (bcs->Status) { |
1271 | case US_BULK_STAT_OK: | |
1272 | break; | |
1273 | case US_BULK_STAT_FAIL: | |
1274 | ub_state_sense(sc, cmd); | |
1275 | return; | |
1276 | case US_BULK_STAT_PHASE: | |
1da177e4 LT |
1277 | goto Bad_End; |
1278 | default: | |
1279 | printk(KERN_INFO "%s: unknown CSW status 0x%x\n", | |
1280 | sc->name, bcs->Status); | |
2c26c9e6 PZ |
1281 | ub_state_done(sc, cmd, -EINVAL); |
1282 | return; | |
1da177e4 LT |
1283 | } |
1284 | ||
1285 | /* Not zeroing error to preserve a babble indicator */ | |
1872bceb PZ |
1286 | if (cmd->error != 0) { |
1287 | ub_state_sense(sc, cmd); | |
1288 | return; | |
1289 | } | |
1da177e4 | 1290 | cmd->state = UB_CMDST_DONE; |
1da177e4 LT |
1291 | ub_cmdq_pop(sc); |
1292 | (*cmd->done)(sc, cmd); | |
1293 | ||
1294 | } else if (cmd->state == UB_CMDST_SENSE) { | |
1295 | ub_state_done(sc, cmd, -EIO); | |
1296 | ||
1297 | } else { | |
9029b174 | 1298 | printk(KERN_WARNING "%s: wrong command state %d\n", |
f4800078 | 1299 | sc->name, cmd->state); |
2c26c9e6 PZ |
1300 | ub_state_done(sc, cmd, -EINVAL); |
1301 | return; | |
1da177e4 LT |
1302 | } |
1303 | return; | |
1304 | ||
1305 | Bad_End: /* Little Excel is dead */ | |
1306 | ub_state_done(sc, cmd, -EIO); | |
1307 | } | |
1308 | ||
a1cf96ef PZ |
1309 | /* |
1310 | * Factorization helper for the command state machine: | |
1311 | * Initiate a data segment transfer. | |
1312 | */ | |
1313 | static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |
1314 | { | |
1315 | struct scatterlist *sg = &cmd->sgv[cmd->current_sg]; | |
1316 | int pipe; | |
1317 | int rc; | |
1318 | ||
1319 | UB_INIT_COMPLETION(sc->work_done); | |
1320 | ||
1321 | if (cmd->dir == UB_DIR_READ) | |
1322 | pipe = sc->recv_bulk_pipe; | |
1323 | else | |
1324 | pipe = sc->send_bulk_pipe; | |
1325 | sc->last_pipe = pipe; | |
45711f1a JA |
1326 | usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe, sg_virt(sg), |
1327 | sg->length, ub_urb_complete, sc); | |
a1cf96ef PZ |
1328 | |
1329 | if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { | |
1330 | /* XXX Clear stalls */ | |
a1cf96ef PZ |
1331 | ub_complete(&sc->work_done); |
1332 | ub_state_done(sc, cmd, rc); | |
1333 | return; | |
1334 | } | |
1335 | ||
2c51ae70 PZ |
1336 | if (cmd->timeo) |
1337 | sc->work_timer.expires = jiffies + cmd->timeo; | |
1338 | else | |
1339 | sc->work_timer.expires = jiffies + UB_DATA_TIMEOUT; | |
a1cf96ef PZ |
1340 | add_timer(&sc->work_timer); |
1341 | ||
1342 | cmd->state = UB_CMDST_DATA; | |
a1cf96ef PZ |
1343 | } |
1344 | ||
1da177e4 LT |
1345 | /* |
1346 | * Factorization helper for the command state machine: | |
1347 | * Finish the command. | |
1348 | */ | |
1349 | static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc) | |
1350 | { | |
1351 | ||
1352 | cmd->error = rc; | |
1353 | cmd->state = UB_CMDST_DONE; | |
1da177e4 LT |
1354 | ub_cmdq_pop(sc); |
1355 | (*cmd->done)(sc, cmd); | |
1356 | } | |
1357 | ||
1358 | /* | |
1359 | * Factorization helper for the command state machine: | |
1360 | * Submit a CSW read. | |
1361 | */ | |
1872bceb | 1362 | static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) |
1da177e4 LT |
1363 | { |
1364 | int rc; | |
1365 | ||
1366 | UB_INIT_COMPLETION(sc->work_done); | |
1367 | ||
1368 | sc->last_pipe = sc->recv_bulk_pipe; | |
1369 | usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->recv_bulk_pipe, | |
1370 | &sc->work_bcs, US_BULK_CS_WRAP_LEN, ub_urb_complete, sc); | |
1da177e4 LT |
1371 | |
1372 | if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { | |
1373 | /* XXX Clear stalls */ | |
1da177e4 LT |
1374 | ub_complete(&sc->work_done); |
1375 | ub_state_done(sc, cmd, rc); | |
1872bceb | 1376 | return -1; |
1da177e4 LT |
1377 | } |
1378 | ||
2c51ae70 PZ |
1379 | if (cmd->timeo) |
1380 | sc->work_timer.expires = jiffies + cmd->timeo; | |
1381 | else | |
1382 | sc->work_timer.expires = jiffies + UB_STAT_TIMEOUT; | |
1da177e4 | 1383 | add_timer(&sc->work_timer); |
1872bceb | 1384 | return 0; |
1da177e4 LT |
1385 | } |
1386 | ||
1387 | /* | |
1388 | * Factorization helper for the command state machine: | |
1389 | * Submit a CSW read and go to STAT state. | |
1390 | */ | |
1391 | static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |
1392 | { | |
1872bceb PZ |
1393 | |
1394 | if (__ub_state_stat(sc, cmd) != 0) | |
1395 | return; | |
1da177e4 LT |
1396 | |
1397 | cmd->stat_count = 0; | |
1398 | cmd->state = UB_CMDST_STAT; | |
1872bceb PZ |
1399 | } |
1400 | ||
1401 | /* | |
1402 | * Factorization helper for the command state machine: | |
1403 | * Submit a CSW read and go to STAT state with counter (along [C] path). | |
1404 | */ | |
1405 | static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |
1406 | { | |
1407 | ||
1408 | if (++cmd->stat_count >= 4) { | |
1409 | ub_state_sense(sc, cmd); | |
1410 | return; | |
1411 | } | |
1412 | ||
1413 | if (__ub_state_stat(sc, cmd) != 0) | |
1414 | return; | |
1415 | ||
1416 | cmd->state = UB_CMDST_STAT; | |
1da177e4 LT |
1417 | } |
1418 | ||
1419 | /* | |
1420 | * Factorization helper for the command state machine: | |
1421 | * Submit a REQUEST SENSE and go to SENSE state. | |
1422 | */ | |
1423 | static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |
1424 | { | |
1425 | struct ub_scsi_cmd *scmd; | |
a1cf96ef | 1426 | struct scatterlist *sg; |
1da177e4 LT |
1427 | int rc; |
1428 | ||
1429 | if (cmd->cdb[0] == REQUEST_SENSE) { | |
1430 | rc = -EPIPE; | |
1431 | goto error; | |
1432 | } | |
1433 | ||
1434 | scmd = &sc->top_rqs_cmd; | |
a1cf96ef | 1435 | memset(scmd, 0, sizeof(struct ub_scsi_cmd)); |
1da177e4 LT |
1436 | scmd->cdb[0] = REQUEST_SENSE; |
1437 | scmd->cdb[4] = UB_SENSE_SIZE; | |
1438 | scmd->cdb_len = 6; | |
1439 | scmd->dir = UB_DIR_READ; | |
1440 | scmd->state = UB_CMDST_INIT; | |
a1cf96ef PZ |
1441 | scmd->nsg = 1; |
1442 | sg = &scmd->sgv[0]; | |
4f33a9d9 | 1443 | sg_init_table(sg, UB_MAX_REQ_SG); |
642f1490 JA |
1444 | sg_set_page(sg, virt_to_page(sc->top_sense), UB_SENSE_SIZE, |
1445 | (unsigned long)sc->top_sense & (PAGE_SIZE-1)); | |
1da177e4 | 1446 | scmd->len = UB_SENSE_SIZE; |
f4800078 | 1447 | scmd->lun = cmd->lun; |
1da177e4 LT |
1448 | scmd->done = ub_top_sense_done; |
1449 | scmd->back = cmd; | |
1450 | ||
1451 | scmd->tag = sc->tagcnt++; | |
1452 | ||
1453 | cmd->state = UB_CMDST_SENSE; | |
1da177e4 LT |
1454 | |
1455 | ub_cmdq_insert(sc, scmd); | |
1456 | return; | |
1457 | ||
1458 | error: | |
1459 | ub_state_done(sc, cmd, rc); | |
1460 | } | |
1461 | ||
1462 | /* | |
1463 | * A helper for the command's state machine: | |
1464 | * Submit a stall clear. | |
1465 | */ | |
1466 | static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd, | |
1467 | int stalled_pipe) | |
1468 | { | |
1469 | int endp; | |
1470 | struct usb_ctrlrequest *cr; | |
1471 | int rc; | |
1472 | ||
1473 | endp = usb_pipeendpoint(stalled_pipe); | |
1474 | if (usb_pipein (stalled_pipe)) | |
1475 | endp |= USB_DIR_IN; | |
1476 | ||
1477 | cr = &sc->work_cr; | |
1478 | cr->bRequestType = USB_RECIP_ENDPOINT; | |
1479 | cr->bRequest = USB_REQ_CLEAR_FEATURE; | |
1480 | cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT); | |
1481 | cr->wIndex = cpu_to_le16(endp); | |
1482 | cr->wLength = cpu_to_le16(0); | |
1483 | ||
1484 | UB_INIT_COMPLETION(sc->work_done); | |
1485 | ||
1486 | usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe, | |
1487 | (unsigned char*) cr, NULL, 0, ub_urb_complete, sc); | |
1da177e4 LT |
1488 | |
1489 | if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { | |
1490 | ub_complete(&sc->work_done); | |
1491 | return rc; | |
1492 | } | |
1493 | ||
1494 | sc->work_timer.expires = jiffies + UB_CTRL_TIMEOUT; | |
1495 | add_timer(&sc->work_timer); | |
1496 | return 0; | |
1497 | } | |
1498 | ||
1499 | /* | |
1500 | */ | |
1501 | static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd) | |
1502 | { | |
a1cf96ef | 1503 | unsigned char *sense = sc->top_sense; |
1da177e4 LT |
1504 | struct ub_scsi_cmd *cmd; |
1505 | ||
1da177e4 LT |
1506 | /* |
1507 | * Find the command which triggered the unit attention or a check, | |
1508 | * save the sense into it, and advance its state machine. | |
1509 | */ | |
1510 | if ((cmd = ub_cmdq_peek(sc)) == NULL) { | |
1511 | printk(KERN_WARNING "%s: sense done while idle\n", sc->name); | |
1512 | return; | |
1513 | } | |
1514 | if (cmd != scmd->back) { | |
1515 | printk(KERN_WARNING "%s: " | |
f4800078 PZ |
1516 | "sense done for wrong command 0x%x\n", |
1517 | sc->name, cmd->tag); | |
1da177e4 LT |
1518 | return; |
1519 | } | |
1520 | if (cmd->state != UB_CMDST_SENSE) { | |
9029b174 | 1521 | printk(KERN_WARNING "%s: sense done with bad cmd state %d\n", |
f4800078 | 1522 | sc->name, cmd->state); |
1da177e4 LT |
1523 | return; |
1524 | } | |
1525 | ||
952ba222 PZ |
1526 | /* |
1527 | * Ignoring scmd->act_len, because the buffer was pre-zeroed. | |
1528 | */ | |
1da177e4 LT |
1529 | cmd->key = sense[2] & 0x0F; |
1530 | cmd->asc = sense[12]; | |
1531 | cmd->ascq = sense[13]; | |
1532 | ||
1533 | ub_scsi_urb_compl(sc, cmd); | |
1534 | } | |
1535 | ||
2c26c9e6 PZ |
1536 | /* |
1537 | * Reset management | |
1538 | */ | |
1539 | ||
2c2e4a2e | 1540 | static void ub_reset_enter(struct ub_dev *sc, int try) |
2c26c9e6 PZ |
1541 | { |
1542 | ||
1543 | if (sc->reset) { | |
1544 | /* This happens often on multi-LUN devices. */ | |
1545 | return; | |
1546 | } | |
2c2e4a2e | 1547 | sc->reset = try + 1; |
2c26c9e6 PZ |
1548 | |
1549 | #if 0 /* Not needed because the disconnect waits for us. */ | |
1550 | unsigned long flags; | |
1551 | spin_lock_irqsave(&ub_lock, flags); | |
1552 | sc->openc++; | |
1553 | spin_unlock_irqrestore(&ub_lock, flags); | |
1554 | #endif | |
1555 | ||
1556 | #if 0 /* We let them stop themselves. */ | |
2c26c9e6 | 1557 | struct ub_lun *lun; |
a69228de | 1558 | list_for_each_entry(lun, &sc->luns, link) { |
2c26c9e6 PZ |
1559 | blk_stop_queue(lun->disk->queue); |
1560 | } | |
1561 | #endif | |
1562 | ||
1563 | schedule_work(&sc->reset_work); | |
1564 | } | |
1565 | ||
c4028958 | 1566 | static void ub_reset_task(struct work_struct *work) |
2c26c9e6 | 1567 | { |
c4028958 | 1568 | struct ub_dev *sc = container_of(work, struct ub_dev, reset_work); |
2c26c9e6 | 1569 | unsigned long flags; |
2c26c9e6 | 1570 | struct ub_lun *lun; |
011b15df | 1571 | int rc; |
2c26c9e6 PZ |
1572 | |
1573 | if (!sc->reset) { | |
1574 | printk(KERN_WARNING "%s: Running reset unrequested\n", | |
1575 | sc->name); | |
1576 | return; | |
1577 | } | |
1578 | ||
1579 | if (atomic_read(&sc->poison)) { | |
b5600339 | 1580 | ; |
2c2e4a2e PZ |
1581 | } else if ((sc->reset & 1) == 0) { |
1582 | ub_sync_reset(sc); | |
1583 | msleep(700); /* usb-storage sleeps 6s (!) */ | |
1584 | ub_probe_clear_stall(sc, sc->recv_bulk_pipe); | |
1585 | ub_probe_clear_stall(sc, sc->send_bulk_pipe); | |
2c26c9e6 | 1586 | } else if (sc->dev->actconfig->desc.bNumInterfaces != 1) { |
b5600339 | 1587 | ; |
2c26c9e6 | 1588 | } else { |
011b15df AS |
1589 | rc = usb_lock_device_for_reset(sc->dev, sc->intf); |
1590 | if (rc < 0) { | |
2c26c9e6 PZ |
1591 | printk(KERN_NOTICE |
1592 | "%s: usb_lock_device_for_reset failed (%d)\n", | |
011b15df | 1593 | sc->name, rc); |
2c26c9e6 PZ |
1594 | } else { |
1595 | rc = usb_reset_device(sc->dev); | |
1596 | if (rc < 0) { | |
1597 | printk(KERN_NOTICE "%s: " | |
1598 | "usb_lock_device_for_reset failed (%d)\n", | |
1599 | sc->name, rc); | |
1600 | } | |
011b15df | 1601 | usb_unlock_device(sc->dev); |
2c26c9e6 PZ |
1602 | } |
1603 | } | |
1604 | ||
1605 | /* | |
1606 | * In theory, no commands can be running while reset is active, | |
1607 | * so nobody can ask for another reset, and so we do not need any | |
1608 | * queues of resets or anything. We do need a spinlock though, | |
1609 | * to interact with block layer. | |
1610 | */ | |
65b4fe55 | 1611 | spin_lock_irqsave(sc->lock, flags); |
2c26c9e6 PZ |
1612 | sc->reset = 0; |
1613 | tasklet_schedule(&sc->tasklet); | |
a69228de | 1614 | list_for_each_entry(lun, &sc->luns, link) { |
2c26c9e6 PZ |
1615 | blk_start_queue(lun->disk->queue); |
1616 | } | |
1617 | wake_up(&sc->reset_wait); | |
65b4fe55 | 1618 | spin_unlock_irqrestore(sc->lock, flags); |
2c26c9e6 PZ |
1619 | } |
1620 | ||
d73b7aff PZ |
1621 | /* |
1622 | * XXX Reset brackets are too much hassle to implement, so just stub them | |
1623 | * in order to prevent forced unbinding (which deadlocks solid when our | |
1624 | * ->disconnect method waits for the reset to complete and this kills keventd). | |
1625 | * | |
1626 | * XXX Tell Alan to move usb_unlock_device inside of usb_reset_device, | |
1627 | * or else the post_reset is invoked, and restats I/O on a locked device. | |
1628 | */ | |
1629 | static int ub_pre_reset(struct usb_interface *iface) { | |
1630 | return 0; | |
1631 | } | |
1632 | ||
1633 | static int ub_post_reset(struct usb_interface *iface) { | |
1634 | return 0; | |
1635 | } | |
1636 | ||
1da177e4 LT |
1637 | /* |
1638 | * This is called from a process context. | |
1639 | */ | |
f4800078 | 1640 | static void ub_revalidate(struct ub_dev *sc, struct ub_lun *lun) |
1da177e4 LT |
1641 | { |
1642 | ||
f4800078 | 1643 | lun->readonly = 0; /* XXX Query this from the device */ |
1da177e4 | 1644 | |
f4800078 PZ |
1645 | lun->capacity.nsec = 0; |
1646 | lun->capacity.bsize = 512; | |
1647 | lun->capacity.bshift = 0; | |
1da177e4 | 1648 | |
f4800078 | 1649 | if (ub_sync_tur(sc, lun) != 0) |
1da177e4 | 1650 | return; /* Not ready */ |
f4800078 | 1651 | lun->changed = 0; |
1da177e4 | 1652 | |
f4800078 | 1653 | if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) { |
1da177e4 LT |
1654 | /* |
1655 | * The retry here means something is wrong, either with the | |
1656 | * device, with the transport, or with our code. | |
1657 | * We keep this because sd.c has retries for capacity. | |
1658 | */ | |
f4800078 PZ |
1659 | if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) { |
1660 | lun->capacity.nsec = 0; | |
1661 | lun->capacity.bsize = 512; | |
1662 | lun->capacity.bshift = 0; | |
1da177e4 LT |
1663 | } |
1664 | } | |
1665 | } | |
1666 | ||
1667 | /* | |
1668 | * The open funcion. | |
1669 | * This is mostly needed to keep refcounting, but also to support | |
1670 | * media checks on removable media drives. | |
1671 | */ | |
4099a966 | 1672 | static int ub_bd_open(struct block_device *bdev, fmode_t mode) |
1da177e4 | 1673 | { |
4099a966 | 1674 | struct ub_lun *lun = bdev->bd_disk->private_data; |
41fea55e | 1675 | struct ub_dev *sc = lun->udev; |
1da177e4 LT |
1676 | unsigned long flags; |
1677 | int rc; | |
1678 | ||
1da177e4 LT |
1679 | spin_lock_irqsave(&ub_lock, flags); |
1680 | if (atomic_read(&sc->poison)) { | |
1681 | spin_unlock_irqrestore(&ub_lock, flags); | |
1682 | return -ENXIO; | |
1683 | } | |
1684 | sc->openc++; | |
1685 | spin_unlock_irqrestore(&ub_lock, flags); | |
1686 | ||
f4800078 | 1687 | if (lun->removable || lun->readonly) |
4099a966 | 1688 | check_disk_change(bdev); |
1da177e4 LT |
1689 | |
1690 | /* | |
1691 | * The sd.c considers ->media_present and ->changed not equivalent, | |
1692 | * under some pretty murky conditions (a failure of READ CAPACITY). | |
1693 | * We may need it one day. | |
1694 | */ | |
4099a966 | 1695 | if (lun->removable && lun->changed && !(mode & FMODE_NDELAY)) { |
1da177e4 LT |
1696 | rc = -ENOMEDIUM; |
1697 | goto err_open; | |
1698 | } | |
1699 | ||
4099a966 | 1700 | if (lun->readonly && (mode & FMODE_WRITE)) { |
1da177e4 LT |
1701 | rc = -EROFS; |
1702 | goto err_open; | |
1703 | } | |
1704 | ||
1705 | return 0; | |
1706 | ||
1707 | err_open: | |
1708 | ub_put(sc); | |
1709 | return rc; | |
1710 | } | |
1711 | ||
1712 | /* | |
1713 | */ | |
4099a966 | 1714 | static int ub_bd_release(struct gendisk *disk, fmode_t mode) |
1da177e4 | 1715 | { |
f4800078 PZ |
1716 | struct ub_lun *lun = disk->private_data; |
1717 | struct ub_dev *sc = lun->udev; | |
1da177e4 LT |
1718 | |
1719 | ub_put(sc); | |
1720 | return 0; | |
1721 | } | |
1722 | ||
1723 | /* | |
1724 | * The ioctl interface. | |
1725 | */ | |
4099a966 | 1726 | static int ub_bd_ioctl(struct block_device *bdev, fmode_t mode, |
1da177e4 LT |
1727 | unsigned int cmd, unsigned long arg) |
1728 | { | |
4099a966 | 1729 | struct gendisk *disk = bdev->bd_disk; |
1da177e4 LT |
1730 | void __user *usermem = (void __user *) arg; |
1731 | ||
4099a966 | 1732 | return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, usermem); |
1da177e4 LT |
1733 | } |
1734 | ||
1735 | /* | |
9029b174 | 1736 | * This is called by check_disk_change if we reported a media change. |
1da177e4 LT |
1737 | * The main onjective here is to discover the features of the media such as |
1738 | * the capacity, read-only status, etc. USB storage generally does not | |
1739 | * need to be spun up, but if we needed it, this would be the place. | |
1740 | * | |
1741 | * This call can sleep. | |
1742 | * | |
1743 | * The return code is not used. | |
1744 | */ | |
1745 | static int ub_bd_revalidate(struct gendisk *disk) | |
1746 | { | |
f4800078 PZ |
1747 | struct ub_lun *lun = disk->private_data; |
1748 | ||
1749 | ub_revalidate(lun->udev, lun); | |
1da177e4 LT |
1750 | |
1751 | /* XXX Support sector size switching like in sr.c */ | |
e1defc4f | 1752 | blk_queue_logical_block_size(disk->queue, lun->capacity.bsize); |
f4800078 PZ |
1753 | set_capacity(disk, lun->capacity.nsec); |
1754 | // set_disk_ro(sdkp->disk, lun->readonly); | |
1da177e4 LT |
1755 | |
1756 | return 0; | |
1757 | } | |
1758 | ||
1759 | /* | |
1760 | * The check is called by the block layer to verify if the media | |
1761 | * is still available. It is supposed to be harmless, lightweight and | |
1762 | * non-intrusive in case the media was not changed. | |
1763 | * | |
1764 | * This call can sleep. | |
1765 | * | |
1766 | * The return code is bool! | |
1767 | */ | |
1768 | static int ub_bd_media_changed(struct gendisk *disk) | |
1769 | { | |
f4800078 | 1770 | struct ub_lun *lun = disk->private_data; |
1da177e4 | 1771 | |
f4800078 | 1772 | if (!lun->removable) |
1da177e4 LT |
1773 | return 0; |
1774 | ||
1775 | /* | |
1776 | * We clean checks always after every command, so this is not | |
1777 | * as dangerous as it looks. If the TEST_UNIT_READY fails here, | |
1778 | * the device is actually not ready with operator or software | |
1779 | * intervention required. One dangerous item might be a drive which | |
1780 | * spins itself down, and come the time to write dirty pages, this | |
1781 | * will fail, then block layer discards the data. Since we never | |
1782 | * spin drives up, such devices simply cannot be used with ub anyway. | |
1783 | */ | |
f4800078 PZ |
1784 | if (ub_sync_tur(lun->udev, lun) != 0) { |
1785 | lun->changed = 1; | |
1da177e4 LT |
1786 | return 1; |
1787 | } | |
1788 | ||
f4800078 | 1789 | return lun->changed; |
1da177e4 LT |
1790 | } |
1791 | ||
1792 | static struct block_device_operations ub_bd_fops = { | |
1793 | .owner = THIS_MODULE, | |
4099a966 AV |
1794 | .open = ub_bd_open, |
1795 | .release = ub_bd_release, | |
1796 | .locked_ioctl = ub_bd_ioctl, | |
1da177e4 LT |
1797 | .media_changed = ub_bd_media_changed, |
1798 | .revalidate_disk = ub_bd_revalidate, | |
1799 | }; | |
1800 | ||
1801 | /* | |
1802 | * Common ->done routine for commands executed synchronously. | |
1803 | */ | |
1804 | static void ub_probe_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |
1805 | { | |
1806 | struct completion *cop = cmd->back; | |
1807 | complete(cop); | |
1808 | } | |
1809 | ||
1810 | /* | |
1811 | * Test if the device has a check condition on it, synchronously. | |
1812 | */ | |
f4800078 | 1813 | static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun) |
1da177e4 LT |
1814 | { |
1815 | struct ub_scsi_cmd *cmd; | |
1816 | enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) }; | |
1817 | unsigned long flags; | |
1818 | struct completion compl; | |
1819 | int rc; | |
1820 | ||
1821 | init_completion(&compl); | |
1822 | ||
1823 | rc = -ENOMEM; | |
29da7937 | 1824 | if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL) |
1da177e4 | 1825 | goto err_alloc; |
1da177e4 LT |
1826 | |
1827 | cmd->cdb[0] = TEST_UNIT_READY; | |
1828 | cmd->cdb_len = 6; | |
1829 | cmd->dir = UB_DIR_NONE; | |
1830 | cmd->state = UB_CMDST_INIT; | |
f4800078 | 1831 | cmd->lun = lun; /* This may be NULL, but that's ok */ |
1da177e4 LT |
1832 | cmd->done = ub_probe_done; |
1833 | cmd->back = &compl; | |
1834 | ||
65b4fe55 | 1835 | spin_lock_irqsave(sc->lock, flags); |
1da177e4 LT |
1836 | cmd->tag = sc->tagcnt++; |
1837 | ||
1838 | rc = ub_submit_scsi(sc, cmd); | |
65b4fe55 | 1839 | spin_unlock_irqrestore(sc->lock, flags); |
1da177e4 | 1840 | |
b5600339 | 1841 | if (rc != 0) |
1da177e4 | 1842 | goto err_submit; |
1da177e4 LT |
1843 | |
1844 | wait_for_completion(&compl); | |
1845 | ||
1846 | rc = cmd->error; | |
1847 | ||
1848 | if (rc == -EIO && cmd->key != 0) /* Retries for benh's key */ | |
1849 | rc = cmd->key; | |
1850 | ||
1851 | err_submit: | |
1852 | kfree(cmd); | |
1853 | err_alloc: | |
1854 | return rc; | |
1855 | } | |
1856 | ||
1857 | /* | |
1858 | * Read the SCSI capacity synchronously (for probing). | |
1859 | */ | |
f4800078 PZ |
1860 | static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, |
1861 | struct ub_capacity *ret) | |
1da177e4 LT |
1862 | { |
1863 | struct ub_scsi_cmd *cmd; | |
a1cf96ef | 1864 | struct scatterlist *sg; |
1da177e4 LT |
1865 | char *p; |
1866 | enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) + 8 }; | |
1867 | unsigned long flags; | |
1868 | unsigned int bsize, shift; | |
1869 | unsigned long nsec; | |
1870 | struct completion compl; | |
1871 | int rc; | |
1872 | ||
1873 | init_completion(&compl); | |
1874 | ||
1875 | rc = -ENOMEM; | |
29da7937 | 1876 | if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL) |
1da177e4 | 1877 | goto err_alloc; |
1da177e4 LT |
1878 | p = (char *)cmd + sizeof(struct ub_scsi_cmd); |
1879 | ||
1880 | cmd->cdb[0] = 0x25; | |
1881 | cmd->cdb_len = 10; | |
1882 | cmd->dir = UB_DIR_READ; | |
1883 | cmd->state = UB_CMDST_INIT; | |
a1cf96ef PZ |
1884 | cmd->nsg = 1; |
1885 | sg = &cmd->sgv[0]; | |
4f33a9d9 | 1886 | sg_init_table(sg, UB_MAX_REQ_SG); |
642f1490 | 1887 | sg_set_page(sg, virt_to_page(p), 8, (unsigned long)p & (PAGE_SIZE-1)); |
1da177e4 | 1888 | cmd->len = 8; |
f4800078 | 1889 | cmd->lun = lun; |
1da177e4 LT |
1890 | cmd->done = ub_probe_done; |
1891 | cmd->back = &compl; | |
1892 | ||
65b4fe55 | 1893 | spin_lock_irqsave(sc->lock, flags); |
1da177e4 LT |
1894 | cmd->tag = sc->tagcnt++; |
1895 | ||
1896 | rc = ub_submit_scsi(sc, cmd); | |
65b4fe55 | 1897 | spin_unlock_irqrestore(sc->lock, flags); |
1da177e4 | 1898 | |
b5600339 | 1899 | if (rc != 0) |
1da177e4 | 1900 | goto err_submit; |
1da177e4 LT |
1901 | |
1902 | wait_for_completion(&compl); | |
1903 | ||
1904 | if (cmd->error != 0) { | |
1da177e4 LT |
1905 | rc = -EIO; |
1906 | goto err_read; | |
1907 | } | |
1908 | if (cmd->act_len != 8) { | |
1da177e4 LT |
1909 | rc = -EIO; |
1910 | goto err_read; | |
1911 | } | |
1912 | ||
1913 | /* sd.c special-cases sector size of 0 to mean 512. Needed? Safe? */ | |
1914 | nsec = be32_to_cpu(*(__be32 *)p) + 1; | |
1915 | bsize = be32_to_cpu(*(__be32 *)(p + 4)); | |
1916 | switch (bsize) { | |
1917 | case 512: shift = 0; break; | |
1918 | case 1024: shift = 1; break; | |
1919 | case 2048: shift = 2; break; | |
1920 | case 4096: shift = 3; break; | |
1921 | default: | |
1da177e4 LT |
1922 | rc = -EDOM; |
1923 | goto err_inv_bsize; | |
1924 | } | |
1925 | ||
1926 | ret->bsize = bsize; | |
1927 | ret->bshift = shift; | |
1928 | ret->nsec = nsec << shift; | |
1929 | rc = 0; | |
1930 | ||
1931 | err_inv_bsize: | |
1932 | err_read: | |
1933 | err_submit: | |
1934 | kfree(cmd); | |
1935 | err_alloc: | |
1936 | return rc; | |
1937 | } | |
1938 | ||
1939 | /* | |
1940 | */ | |
7d12e780 | 1941 | static void ub_probe_urb_complete(struct urb *urb) |
1da177e4 LT |
1942 | { |
1943 | struct completion *cop = urb->context; | |
1944 | complete(cop); | |
1945 | } | |
1946 | ||
1947 | static void ub_probe_timeout(unsigned long arg) | |
1948 | { | |
1949 | struct completion *cop = (struct completion *) arg; | |
1950 | complete(cop); | |
1951 | } | |
1952 | ||
2c2e4a2e PZ |
1953 | /* |
1954 | * Reset with a Bulk reset. | |
1955 | */ | |
1956 | static int ub_sync_reset(struct ub_dev *sc) | |
1957 | { | |
1958 | int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber; | |
1959 | struct usb_ctrlrequest *cr; | |
1960 | struct completion compl; | |
1961 | struct timer_list timer; | |
1962 | int rc; | |
1963 | ||
1964 | init_completion(&compl); | |
1965 | ||
1966 | cr = &sc->work_cr; | |
1967 | cr->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE; | |
1968 | cr->bRequest = US_BULK_RESET_REQUEST; | |
1969 | cr->wValue = cpu_to_le16(0); | |
1970 | cr->wIndex = cpu_to_le16(ifnum); | |
1971 | cr->wLength = cpu_to_le16(0); | |
1972 | ||
1973 | usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe, | |
1974 | (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl); | |
2c2e4a2e PZ |
1975 | |
1976 | if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) { | |
1977 | printk(KERN_WARNING | |
1978 | "%s: Unable to submit a bulk reset (%d)\n", sc->name, rc); | |
1979 | return rc; | |
1980 | } | |
1981 | ||
1982 | init_timer(&timer); | |
1983 | timer.function = ub_probe_timeout; | |
1984 | timer.data = (unsigned long) &compl; | |
1985 | timer.expires = jiffies + UB_CTRL_TIMEOUT; | |
1986 | add_timer(&timer); | |
1987 | ||
1988 | wait_for_completion(&compl); | |
1989 | ||
1990 | del_timer_sync(&timer); | |
1991 | usb_kill_urb(&sc->work_urb); | |
1992 | ||
1993 | return sc->work_urb.status; | |
1994 | } | |
1995 | ||
f4800078 PZ |
1996 | /* |
1997 | * Get number of LUNs by the way of Bulk GetMaxLUN command. | |
1998 | */ | |
1999 | static int ub_sync_getmaxlun(struct ub_dev *sc) | |
2000 | { | |
2001 | int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber; | |
2002 | unsigned char *p; | |
2003 | enum { ALLOC_SIZE = 1 }; | |
2004 | struct usb_ctrlrequest *cr; | |
2005 | struct completion compl; | |
2006 | struct timer_list timer; | |
2007 | int nluns; | |
2008 | int rc; | |
2009 | ||
2010 | init_completion(&compl); | |
2011 | ||
2012 | rc = -ENOMEM; | |
2013 | if ((p = kmalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL) | |
2014 | goto err_alloc; | |
2015 | *p = 55; | |
2016 | ||
2017 | cr = &sc->work_cr; | |
2018 | cr->bRequestType = USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE; | |
2019 | cr->bRequest = US_BULK_GET_MAX_LUN; | |
2020 | cr->wValue = cpu_to_le16(0); | |
2021 | cr->wIndex = cpu_to_le16(ifnum); | |
2022 | cr->wLength = cpu_to_le16(1); | |
2023 | ||
2024 | usb_fill_control_urb(&sc->work_urb, sc->dev, sc->recv_ctrl_pipe, | |
2025 | (unsigned char*) cr, p, 1, ub_probe_urb_complete, &compl); | |
f4800078 | 2026 | |
b5600339 | 2027 | if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) |
f4800078 | 2028 | goto err_submit; |
f4800078 PZ |
2029 | |
2030 | init_timer(&timer); | |
2031 | timer.function = ub_probe_timeout; | |
2032 | timer.data = (unsigned long) &compl; | |
2033 | timer.expires = jiffies + UB_CTRL_TIMEOUT; | |
2034 | add_timer(&timer); | |
2035 | ||
2036 | wait_for_completion(&compl); | |
2037 | ||
2038 | del_timer_sync(&timer); | |
2039 | usb_kill_urb(&sc->work_urb); | |
2040 | ||
b5600339 | 2041 | if ((rc = sc->work_urb.status) < 0) |
64bd8453 | 2042 | goto err_io; |
64bd8453 | 2043 | |
f4800078 | 2044 | if (sc->work_urb.actual_length != 1) { |
f4800078 PZ |
2045 | nluns = 0; |
2046 | } else { | |
2047 | if ((nluns = *p) == 55) { | |
2048 | nluns = 0; | |
2049 | } else { | |
2050 | /* GetMaxLUN returns the maximum LUN number */ | |
2051 | nluns += 1; | |
2052 | if (nluns > UB_MAX_LUNS) | |
2053 | nluns = UB_MAX_LUNS; | |
2054 | } | |
f4800078 PZ |
2055 | } |
2056 | ||
2057 | kfree(p); | |
2058 | return nluns; | |
2059 | ||
64bd8453 | 2060 | err_io: |
f4800078 PZ |
2061 | err_submit: |
2062 | kfree(p); | |
2063 | err_alloc: | |
2064 | return rc; | |
2065 | } | |
2066 | ||
1da177e4 LT |
2067 | /* |
2068 | * Clear initial stalls. | |
2069 | */ | |
2070 | static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe) | |
2071 | { | |
2072 | int endp; | |
2073 | struct usb_ctrlrequest *cr; | |
2074 | struct completion compl; | |
2075 | struct timer_list timer; | |
2076 | int rc; | |
2077 | ||
2078 | init_completion(&compl); | |
2079 | ||
2080 | endp = usb_pipeendpoint(stalled_pipe); | |
2081 | if (usb_pipein (stalled_pipe)) | |
2082 | endp |= USB_DIR_IN; | |
2083 | ||
2084 | cr = &sc->work_cr; | |
2085 | cr->bRequestType = USB_RECIP_ENDPOINT; | |
2086 | cr->bRequest = USB_REQ_CLEAR_FEATURE; | |
2087 | cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT); | |
2088 | cr->wIndex = cpu_to_le16(endp); | |
2089 | cr->wLength = cpu_to_le16(0); | |
2090 | ||
2091 | usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe, | |
2092 | (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl); | |
1da177e4 LT |
2093 | |
2094 | if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) { | |
2095 | printk(KERN_WARNING | |
2096 | "%s: Unable to submit a probe clear (%d)\n", sc->name, rc); | |
2097 | return rc; | |
2098 | } | |
2099 | ||
2100 | init_timer(&timer); | |
2101 | timer.function = ub_probe_timeout; | |
2102 | timer.data = (unsigned long) &compl; | |
2103 | timer.expires = jiffies + UB_CTRL_TIMEOUT; | |
2104 | add_timer(&timer); | |
2105 | ||
2106 | wait_for_completion(&compl); | |
2107 | ||
2108 | del_timer_sync(&timer); | |
2109 | usb_kill_urb(&sc->work_urb); | |
2110 | ||
3444b26a | 2111 | usb_reset_endpoint(sc->dev, endp); |
1da177e4 LT |
2112 | |
2113 | return 0; | |
2114 | } | |
2115 | ||
2116 | /* | |
2117 | * Get the pipe settings. | |
2118 | */ | |
2119 | static int ub_get_pipes(struct ub_dev *sc, struct usb_device *dev, | |
2120 | struct usb_interface *intf) | |
2121 | { | |
2122 | struct usb_host_interface *altsetting = intf->cur_altsetting; | |
2123 | struct usb_endpoint_descriptor *ep_in = NULL; | |
2124 | struct usb_endpoint_descriptor *ep_out = NULL; | |
2125 | struct usb_endpoint_descriptor *ep; | |
2126 | int i; | |
2127 | ||
2128 | /* | |
2129 | * Find the endpoints we need. | |
2130 | * We are expecting a minimum of 2 endpoints - in and out (bulk). | |
2131 | * We will ignore any others. | |
2132 | */ | |
2133 | for (i = 0; i < altsetting->desc.bNumEndpoints; i++) { | |
2134 | ep = &altsetting->endpoint[i].desc; | |
2135 | ||
2136 | /* Is it a BULK endpoint? */ | |
db5e6df1 | 2137 | if (usb_endpoint_xfer_bulk(ep)) { |
1da177e4 | 2138 | /* BULK in or out? */ |
db5e6df1 | 2139 | if (usb_endpoint_dir_in(ep)) { |
643616e6 PZ |
2140 | if (ep_in == NULL) |
2141 | ep_in = ep; | |
2142 | } else { | |
2143 | if (ep_out == NULL) | |
2144 | ep_out = ep; | |
2145 | } | |
1da177e4 LT |
2146 | } |
2147 | } | |
2148 | ||
2149 | if (ep_in == NULL || ep_out == NULL) { | |
9029b174 | 2150 | printk(KERN_NOTICE "%s: failed endpoint check\n", sc->name); |
2c26c9e6 | 2151 | return -ENODEV; |
1da177e4 LT |
2152 | } |
2153 | ||
2154 | /* Calculate and store the pipe values */ | |
2155 | sc->send_ctrl_pipe = usb_sndctrlpipe(dev, 0); | |
2156 | sc->recv_ctrl_pipe = usb_rcvctrlpipe(dev, 0); | |
2157 | sc->send_bulk_pipe = usb_sndbulkpipe(dev, | |
db5e6df1 | 2158 | usb_endpoint_num(ep_out)); |
1da177e4 | 2159 | sc->recv_bulk_pipe = usb_rcvbulkpipe(dev, |
db5e6df1 | 2160 | usb_endpoint_num(ep_in)); |
1da177e4 LT |
2161 | |
2162 | return 0; | |
2163 | } | |
2164 | ||
2165 | /* | |
2166 | * Probing is done in the process context, which allows us to cheat | |
2167 | * and not to build a state machine for the discovery. | |
2168 | */ | |
2169 | static int ub_probe(struct usb_interface *intf, | |
2170 | const struct usb_device_id *dev_id) | |
2171 | { | |
2172 | struct ub_dev *sc; | |
f4800078 | 2173 | int nluns; |
1da177e4 LT |
2174 | int rc; |
2175 | int i; | |
2176 | ||
a00828e9 PZ |
2177 | if (usb_usual_check_type(dev_id, USB_US_TYPE_UB)) |
2178 | return -ENXIO; | |
2179 | ||
1da177e4 | 2180 | rc = -ENOMEM; |
29da7937 | 2181 | if ((sc = kzalloc(sizeof(struct ub_dev), GFP_KERNEL)) == NULL) |
1da177e4 | 2182 | goto err_core; |
65b4fe55 | 2183 | sc->lock = ub_next_lock(); |
f4800078 | 2184 | INIT_LIST_HEAD(&sc->luns); |
1da177e4 LT |
2185 | usb_init_urb(&sc->work_urb); |
2186 | tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc); | |
2187 | atomic_set(&sc->poison, 0); | |
c4028958 | 2188 | INIT_WORK(&sc->reset_work, ub_reset_task); |
2c26c9e6 | 2189 | init_waitqueue_head(&sc->reset_wait); |
1da177e4 LT |
2190 | |
2191 | init_timer(&sc->work_timer); | |
2192 | sc->work_timer.data = (unsigned long) sc; | |
2193 | sc->work_timer.function = ub_urb_timeout; | |
2194 | ||
2195 | ub_init_completion(&sc->work_done); | |
2196 | sc->work_done.done = 1; /* A little yuk, but oh well... */ | |
2197 | ||
1da177e4 LT |
2198 | sc->dev = interface_to_usbdev(intf); |
2199 | sc->intf = intf; | |
2200 | // sc->ifnum = intf->cur_altsetting->desc.bInterfaceNumber; | |
1da177e4 LT |
2201 | usb_set_intfdata(intf, sc); |
2202 | usb_get_dev(sc->dev); | |
77ef6c4d PZ |
2203 | /* |
2204 | * Since we give the interface struct to the block level through | |
2205 | * disk->driverfs_dev, we have to pin it. Otherwise, block_uevent | |
2206 | * oopses on close after a disconnect (kernels 2.6.16 and up). | |
2207 | */ | |
2208 | usb_get_intf(sc->intf); | |
1da177e4 | 2209 | |
f4800078 PZ |
2210 | snprintf(sc->name, 12, DRV_NAME "(%d.%d)", |
2211 | sc->dev->bus->busnum, sc->dev->devnum); | |
2212 | ||
1da177e4 LT |
2213 | /* XXX Verify that we can handle the device (from descriptors) */ |
2214 | ||
2c26c9e6 PZ |
2215 | if (ub_get_pipes(sc, sc->dev, intf) != 0) |
2216 | goto err_dev_desc; | |
1da177e4 | 2217 | |
1da177e4 LT |
2218 | /* |
2219 | * At this point, all USB initialization is done, do upper layer. | |
2220 | * We really hate halfway initialized structures, so from the | |
2221 | * invariants perspective, this ub_dev is fully constructed at | |
2222 | * this point. | |
2223 | */ | |
2224 | ||
2225 | /* | |
2226 | * This is needed to clear toggles. It is a problem only if we do | |
2227 | * `rmmod ub && modprobe ub` without disconnects, but we like that. | |
2228 | */ | |
c6c88834 | 2229 | #if 0 /* iPod Mini fails if we do this (big white iPod works) */ |
1da177e4 LT |
2230 | ub_probe_clear_stall(sc, sc->recv_bulk_pipe); |
2231 | ub_probe_clear_stall(sc, sc->send_bulk_pipe); | |
c6c88834 | 2232 | #endif |
1da177e4 LT |
2233 | |
2234 | /* | |
2235 | * The way this is used by the startup code is a little specific. | |
2236 | * A SCSI check causes a USB stall. Our common case code sees it | |
2237 | * and clears the check, after which the device is ready for use. | |
2238 | * But if a check was not present, any command other than | |
2239 | * TEST_UNIT_READY ends with a lockup (including REQUEST_SENSE). | |
2240 | * | |
2241 | * If we neglect to clear the SCSI check, the first real command fails | |
2242 | * (which is the capacity readout). We clear that and retry, but why | |
2243 | * causing spurious retries for no reason. | |
2244 | * | |
2245 | * Revalidation may start with its own TEST_UNIT_READY, but that one | |
2246 | * has to succeed, so we clear checks with an additional one here. | |
2247 | * In any case it's not our business how revaliadation is implemented. | |
2248 | */ | |
b5600339 | 2249 | for (i = 0; i < 3; i++) { /* Retries for the schwag key from KS'04 */ |
f4800078 | 2250 | if ((rc = ub_sync_tur(sc, NULL)) <= 0) break; |
1da177e4 LT |
2251 | if (rc != 0x6) break; |
2252 | msleep(10); | |
2253 | } | |
2254 | ||
f4800078 PZ |
2255 | nluns = 1; |
2256 | for (i = 0; i < 3; i++) { | |
11a223ae | 2257 | if ((rc = ub_sync_getmaxlun(sc)) < 0) |
f4800078 | 2258 | break; |
f4800078 PZ |
2259 | if (rc != 0) { |
2260 | nluns = rc; | |
2261 | break; | |
2262 | } | |
9f793d2c | 2263 | msleep(100); |
f4800078 | 2264 | } |
1da177e4 | 2265 | |
f4800078 PZ |
2266 | for (i = 0; i < nluns; i++) { |
2267 | ub_probe_lun(sc, i); | |
2268 | } | |
2269 | return 0; | |
2270 | ||
2c26c9e6 | 2271 | err_dev_desc: |
f4800078 | 2272 | usb_set_intfdata(intf, NULL); |
77ef6c4d | 2273 | usb_put_intf(sc->intf); |
f4800078 PZ |
2274 | usb_put_dev(sc->dev); |
2275 | kfree(sc); | |
2276 | err_core: | |
2277 | return rc; | |
2278 | } | |
2279 | ||
2280 | static int ub_probe_lun(struct ub_dev *sc, int lnum) | |
2281 | { | |
2282 | struct ub_lun *lun; | |
165125e1 | 2283 | struct request_queue *q; |
f4800078 PZ |
2284 | struct gendisk *disk; |
2285 | int rc; | |
2286 | ||
2287 | rc = -ENOMEM; | |
29da7937 | 2288 | if ((lun = kzalloc(sizeof(struct ub_lun), GFP_KERNEL)) == NULL) |
f4800078 | 2289 | goto err_alloc; |
f4800078 PZ |
2290 | lun->num = lnum; |
2291 | ||
2292 | rc = -ENOSR; | |
2293 | if ((lun->id = ub_id_get()) == -1) | |
2294 | goto err_id; | |
2295 | ||
2296 | lun->udev = sc; | |
f4800078 PZ |
2297 | |
2298 | snprintf(lun->name, 16, DRV_NAME "%c(%d.%d.%d)", | |
2299 | lun->id + 'a', sc->dev->bus->busnum, sc->dev->devnum, lun->num); | |
2300 | ||
2301 | lun->removable = 1; /* XXX Query this from the device */ | |
2302 | lun->changed = 1; /* ub_revalidate clears only */ | |
f4800078 | 2303 | ub_revalidate(sc, lun); |
1da177e4 | 2304 | |
1da177e4 | 2305 | rc = -ENOMEM; |
4fb729f5 | 2306 | if ((disk = alloc_disk(UB_PARTS_PER_LUN)) == NULL) |
1da177e4 LT |
2307 | goto err_diskalloc; |
2308 | ||
f4800078 | 2309 | sprintf(disk->disk_name, DRV_NAME "%c", lun->id + 'a'); |
1da177e4 | 2310 | disk->major = UB_MAJOR; |
4fb729f5 | 2311 | disk->first_minor = lun->id * UB_PARTS_PER_LUN; |
1da177e4 | 2312 | disk->fops = &ub_bd_fops; |
f4800078 | 2313 | disk->private_data = lun; |
64bd8453 | 2314 | disk->driverfs_dev = &sc->intf->dev; |
1da177e4 LT |
2315 | |
2316 | rc = -ENOMEM; | |
65b4fe55 | 2317 | if ((q = blk_init_queue(ub_request_fn, sc->lock)) == NULL) |
1da177e4 LT |
2318 | goto err_blkqinit; |
2319 | ||
2320 | disk->queue = q; | |
2321 | ||
f4800078 | 2322 | blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); |
1da177e4 LT |
2323 | blk_queue_max_hw_segments(q, UB_MAX_REQ_SG); |
2324 | blk_queue_max_phys_segments(q, UB_MAX_REQ_SG); | |
f4800078 | 2325 | blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */ |
1da177e4 | 2326 | blk_queue_max_sectors(q, UB_MAX_SECTORS); |
e1defc4f | 2327 | blk_queue_logical_block_size(q, lun->capacity.bsize); |
1da177e4 | 2328 | |
688e9fb1 | 2329 | lun->disk = disk; |
f4800078 | 2330 | q->queuedata = lun; |
688e9fb1 | 2331 | list_add(&lun->link, &sc->luns); |
1da177e4 | 2332 | |
f4800078 PZ |
2333 | set_capacity(disk, lun->capacity.nsec); |
2334 | if (lun->removable) | |
1da177e4 LT |
2335 | disk->flags |= GENHD_FL_REMOVABLE; |
2336 | ||
2337 | add_disk(disk); | |
2338 | ||
2339 | return 0; | |
2340 | ||
2341 | err_blkqinit: | |
2342 | put_disk(disk); | |
2343 | err_diskalloc: | |
f4800078 | 2344 | ub_id_put(lun->id); |
1da177e4 | 2345 | err_id: |
f4800078 PZ |
2346 | kfree(lun); |
2347 | err_alloc: | |
1da177e4 LT |
2348 | return rc; |
2349 | } | |
2350 | ||
2351 | static void ub_disconnect(struct usb_interface *intf) | |
2352 | { | |
2353 | struct ub_dev *sc = usb_get_intfdata(intf); | |
f4800078 | 2354 | struct ub_lun *lun; |
1da177e4 LT |
2355 | unsigned long flags; |
2356 | ||
2357 | /* | |
2358 | * Prevent ub_bd_release from pulling the rug from under us. | |
2359 | * XXX This is starting to look like a kref. | |
2360 | * XXX Why not to take this ref at probe time? | |
2361 | */ | |
2362 | spin_lock_irqsave(&ub_lock, flags); | |
2363 | sc->openc++; | |
2364 | spin_unlock_irqrestore(&ub_lock, flags); | |
2365 | ||
2366 | /* | |
9029b174 | 2367 | * Fence stall clearings, operations triggered by unlinkings and so on. |
1da177e4 LT |
2368 | * We do not attempt to unlink any URBs, because we do not trust the |
2369 | * unlink paths in HC drivers. Also, we get -84 upon disconnect anyway. | |
2370 | */ | |
2371 | atomic_set(&sc->poison, 1); | |
2372 | ||
2c26c9e6 PZ |
2373 | /* |
2374 | * Wait for reset to end, if any. | |
2375 | */ | |
2376 | wait_event(sc->reset_wait, !sc->reset); | |
2377 | ||
1da177e4 LT |
2378 | /* |
2379 | * Blow away queued commands. | |
2380 | * | |
2381 | * Actually, this never works, because before we get here | |
2382 | * the HCD terminates outstanding URB(s). It causes our | |
2383 | * SCSI command queue to advance, commands fail to submit, | |
2384 | * and the whole queue drains. So, we just use this code to | |
2385 | * print warnings. | |
2386 | */ | |
65b4fe55 | 2387 | spin_lock_irqsave(sc->lock, flags); |
1da177e4 LT |
2388 | { |
2389 | struct ub_scsi_cmd *cmd; | |
2390 | int cnt = 0; | |
2c26c9e6 | 2391 | while ((cmd = ub_cmdq_peek(sc)) != NULL) { |
1da177e4 LT |
2392 | cmd->error = -ENOTCONN; |
2393 | cmd->state = UB_CMDST_DONE; | |
1da177e4 LT |
2394 | ub_cmdq_pop(sc); |
2395 | (*cmd->done)(sc, cmd); | |
2396 | cnt++; | |
2397 | } | |
2398 | if (cnt != 0) { | |
2399 | printk(KERN_WARNING "%s: " | |
2400 | "%d was queued after shutdown\n", sc->name, cnt); | |
2401 | } | |
2402 | } | |
65b4fe55 | 2403 | spin_unlock_irqrestore(sc->lock, flags); |
1da177e4 LT |
2404 | |
2405 | /* | |
2406 | * Unregister the upper layer. | |
2407 | */ | |
a69228de | 2408 | list_for_each_entry(lun, &sc->luns, link) { |
688e9fb1 | 2409 | del_gendisk(lun->disk); |
f4800078 PZ |
2410 | /* |
2411 | * I wish I could do: | |
75ad23bc | 2412 | * queue_flag_set(QUEUE_FLAG_DEAD, q); |
f4800078 PZ |
2413 | * As it is, we rely on our internal poisoning and let |
2414 | * the upper levels to spin furiously failing all the I/O. | |
2415 | */ | |
2416 | } | |
1da177e4 LT |
2417 | |
2418 | /* | |
1da177e4 LT |
2419 | * Testing for -EINPROGRESS is always a bug, so we are bending |
2420 | * the rules a little. | |
2421 | */ | |
65b4fe55 | 2422 | spin_lock_irqsave(sc->lock, flags); |
1da177e4 LT |
2423 | if (sc->work_urb.status == -EINPROGRESS) { /* janitors: ignore */ |
2424 | printk(KERN_WARNING "%s: " | |
2425 | "URB is active after disconnect\n", sc->name); | |
2426 | } | |
65b4fe55 | 2427 | spin_unlock_irqrestore(sc->lock, flags); |
1da177e4 LT |
2428 | |
2429 | /* | |
9029b174 | 2430 | * There is virtually no chance that other CPU runs a timeout so long |
1da177e4 LT |
2431 | * after ub_urb_complete should have called del_timer, but only if HCD |
2432 | * didn't forget to deliver a callback on unlink. | |
2433 | */ | |
2434 | del_timer_sync(&sc->work_timer); | |
2435 | ||
2436 | /* | |
2437 | * At this point there must be no commands coming from anyone | |
2438 | * and no URBs left in transit. | |
2439 | */ | |
2440 | ||
1da177e4 LT |
2441 | ub_put(sc); |
2442 | } | |
2443 | ||
2444 | static struct usb_driver ub_driver = { | |
1da177e4 LT |
2445 | .name = "ub", |
2446 | .probe = ub_probe, | |
2447 | .disconnect = ub_disconnect, | |
2448 | .id_table = ub_usb_ids, | |
d73b7aff PZ |
2449 | .pre_reset = ub_pre_reset, |
2450 | .post_reset = ub_post_reset, | |
1da177e4 LT |
2451 | }; |
2452 | ||
2453 | static int __init ub_init(void) | |
2454 | { | |
2455 | int rc; | |
65b4fe55 PZ |
2456 | int i; |
2457 | ||
2458 | for (i = 0; i < UB_QLOCK_NUM; i++) | |
2459 | spin_lock_init(&ub_qlockv[i]); | |
1da177e4 | 2460 | |
1da177e4 LT |
2461 | if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0) |
2462 | goto err_regblkdev; | |
1da177e4 LT |
2463 | |
2464 | if ((rc = usb_register(&ub_driver)) != 0) | |
2465 | goto err_register; | |
2466 | ||
a00828e9 | 2467 | usb_usual_set_present(USB_US_TYPE_UB); |
1da177e4 LT |
2468 | return 0; |
2469 | ||
2470 | err_register: | |
1da177e4 LT |
2471 | unregister_blkdev(UB_MAJOR, DRV_NAME); |
2472 | err_regblkdev: | |
2473 | return rc; | |
2474 | } | |
2475 | ||
2476 | static void __exit ub_exit(void) | |
2477 | { | |
2478 | usb_deregister(&ub_driver); | |
2479 | ||
1da177e4 | 2480 | unregister_blkdev(UB_MAJOR, DRV_NAME); |
a00828e9 | 2481 | usb_usual_clear_present(USB_US_TYPE_UB); |
1da177e4 LT |
2482 | } |
2483 | ||
2484 | module_init(ub_init); | |
2485 | module_exit(ub_exit); | |
2486 | ||
2487 | MODULE_LICENSE("GPL"); |