]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * The low performance USB storage driver (ub). | |
3 | * | |
4 | * Copyright (c) 1999, 2000 Matthew Dharm ([email protected]) | |
5 | * Copyright (C) 2004 Pete Zaitcev ([email protected]) | |
6 | * | |
7 | * This work is a part of Linux kernel, is derived from it, | |
8 | * and is not licensed separately. See file COPYING for details. | |
9 | * | |
10 | * TODO (sorted by decreasing priority) | |
ef45cb62 | 11 | * -- Return sense now that rq allows it (we always auto-sense anyway). |
1da177e4 LT |
12 | * -- set readonly flag for CDs, set removable flag for CF readers |
13 | * -- do inquiry and verify we got a disk and not a tape (for LUN mismatch) | |
1da177e4 | 14 | * -- verify the 13 conditions and do bulk resets |
ba6abf13 | 15 | * -- highmem |
1da177e4 LT |
16 | * -- move top_sense and work_bcs into separate allocations (if they survive) |
17 | * for cache purists and esoteric architectures. | |
ba6abf13 | 18 | * -- Allocate structure for LUN 0 before the first ub_sync_tur, avoid NULL. ? |
1da177e4 | 19 | * -- prune comments, they are too volumnous |
1da177e4 | 20 | * -- Resove XXX's |
1872bceb | 21 | * -- CLEAR, CLR2STS, CLRRS seem to be ripe for refactoring. |
1da177e4 LT |
22 | */ |
23 | #include <linux/kernel.h> | |
24 | #include <linux/module.h> | |
25 | #include <linux/usb.h> | |
a00828e9 | 26 | #include <linux/usb_usual.h> |
1da177e4 | 27 | #include <linux/blkdev.h> |
1da177e4 | 28 | #include <linux/timer.h> |
45711f1a | 29 | #include <linux/scatterlist.h> |
1da177e4 LT |
30 | #include <scsi/scsi.h> |
31 | ||
32 | #define DRV_NAME "ub" | |
1da177e4 LT |
33 | |
34 | #define UB_MAJOR 180 | |
35 | ||
1872bceb PZ |
36 | /* |
37 | * The command state machine is the key model for understanding of this driver. | |
38 | * | |
39 | * The general rule is that all transitions are done towards the bottom | |
40 | * of the diagram, thus preventing any loops. | |
41 | * | |
42 | * An exception to that is how the STAT state is handled. A counter allows it | |
43 | * to be re-entered along the path marked with [C]. | |
44 | * | |
45 | * +--------+ | |
46 | * ! INIT ! | |
47 | * +--------+ | |
48 | * ! | |
49 | * ub_scsi_cmd_start fails ->--------------------------------------\ | |
50 | * ! ! | |
51 | * V ! | |
52 | * +--------+ ! | |
53 | * ! CMD ! ! | |
54 | * +--------+ ! | |
55 | * ! +--------+ ! | |
56 | * was -EPIPE -->-------------------------------->! CLEAR ! ! | |
57 | * ! +--------+ ! | |
58 | * ! ! ! | |
59 | * was error -->------------------------------------- ! --------->\ | |
60 | * ! ! ! | |
61 | * /--<-- cmd->dir == NONE ? ! ! | |
62 | * ! ! ! ! | |
63 | * ! V ! ! | |
64 | * ! +--------+ ! ! | |
65 | * ! ! DATA ! ! ! | |
66 | * ! +--------+ ! ! | |
67 | * ! ! +---------+ ! ! | |
68 | * ! was -EPIPE -->--------------->! CLR2STS ! ! ! | |
69 | * ! ! +---------+ ! ! | |
70 | * ! ! ! ! ! | |
71 | * ! ! was error -->---- ! --------->\ | |
72 | * ! was error -->--------------------- ! ------------- ! --------->\ | |
73 | * ! ! ! ! ! | |
74 | * ! V ! ! ! | |
75 | * \--->+--------+ ! ! ! | |
76 | * ! STAT !<--------------------------/ ! ! | |
77 | * /--->+--------+ ! ! | |
78 | * ! ! ! ! | |
79 | * [C] was -EPIPE -->-----------\ ! ! | |
80 | * ! ! ! ! ! | |
81 | * +<---- len == 0 ! ! ! | |
82 | * ! ! ! ! ! | |
83 | * ! was error -->--------------------------------------!---------->\ | |
84 | * ! ! ! ! ! | |
85 | * +<---- bad CSW ! ! ! | |
86 | * +<---- bad tag ! ! ! | |
87 | * ! ! V ! ! | |
88 | * ! ! +--------+ ! ! | |
89 | * ! ! ! CLRRS ! ! ! | |
90 | * ! ! +--------+ ! ! | |
91 | * ! ! ! ! ! | |
92 | * \------- ! --------------------[C]--------\ ! ! | |
93 | * ! ! ! ! | |
94 | * cmd->error---\ +--------+ ! ! | |
95 | * ! +--------------->! SENSE !<----------/ ! | |
96 | * STAT_FAIL----/ +--------+ ! | |
97 | * ! ! V | |
98 | * ! V +--------+ | |
99 | * \--------------------------------\--------------------->! DONE ! | |
100 | * +--------+ | |
101 | */ | |
102 | ||
1da177e4 | 103 | /* |
f4800078 PZ |
104 | * This many LUNs per USB device. |
105 | * Every one of them takes a host, see UB_MAX_HOSTS. | |
1da177e4 | 106 | */ |
9f793d2c | 107 | #define UB_MAX_LUNS 9 |
f4800078 PZ |
108 | |
109 | /* | |
110 | */ | |
111 | ||
4fb729f5 | 112 | #define UB_PARTS_PER_LUN 8 |
1da177e4 LT |
113 | |
114 | #define UB_MAX_CDB_SIZE 16 /* Corresponds to Bulk */ | |
115 | ||
116 | #define UB_SENSE_SIZE 18 | |
117 | ||
118 | /* | |
119 | */ | |
120 | ||
121 | /* command block wrapper */ | |
122 | struct bulk_cb_wrap { | |
123 | __le32 Signature; /* contains 'USBC' */ | |
124 | u32 Tag; /* unique per command id */ | |
125 | __le32 DataTransferLength; /* size of data */ | |
126 | u8 Flags; /* direction in bit 0 */ | |
f4800078 | 127 | u8 Lun; /* LUN */ |
1da177e4 LT |
128 | u8 Length; /* of of the CDB */ |
129 | u8 CDB[UB_MAX_CDB_SIZE]; /* max command */ | |
130 | }; | |
131 | ||
132 | #define US_BULK_CB_WRAP_LEN 31 | |
133 | #define US_BULK_CB_SIGN 0x43425355 /*spells out USBC */ | |
134 | #define US_BULK_FLAG_IN 1 | |
135 | #define US_BULK_FLAG_OUT 0 | |
136 | ||
137 | /* command status wrapper */ | |
138 | struct bulk_cs_wrap { | |
139 | __le32 Signature; /* should = 'USBS' */ | |
140 | u32 Tag; /* same as original command */ | |
141 | __le32 Residue; /* amount not transferred */ | |
142 | u8 Status; /* see below */ | |
143 | }; | |
144 | ||
145 | #define US_BULK_CS_WRAP_LEN 13 | |
146 | #define US_BULK_CS_SIGN 0x53425355 /* spells out 'USBS' */ | |
1da177e4 LT |
147 | #define US_BULK_STAT_OK 0 |
148 | #define US_BULK_STAT_FAIL 1 | |
149 | #define US_BULK_STAT_PHASE 2 | |
150 | ||
151 | /* bulk-only class specific requests */ | |
152 | #define US_BULK_RESET_REQUEST 0xff | |
153 | #define US_BULK_GET_MAX_LUN 0xfe | |
154 | ||
155 | /* | |
156 | */ | |
157 | struct ub_dev; | |
158 | ||
64bd8453 | 159 | #define UB_MAX_REQ_SG 9 /* cdrecord requires 32KB and maybe a header */ |
1da177e4 LT |
160 | #define UB_MAX_SECTORS 64 |
161 | ||
162 | /* | |
163 | * A second is more than enough for a 32K transfer (UB_MAX_SECTORS) | |
164 | * even if a webcam hogs the bus, but some devices need time to spin up. | |
165 | */ | |
166 | #define UB_URB_TIMEOUT (HZ*2) | |
167 | #define UB_DATA_TIMEOUT (HZ*5) /* ZIP does spin-ups in the data phase */ | |
168 | #define UB_STAT_TIMEOUT (HZ*5) /* Same spinups and eject for a dataless cmd. */ | |
169 | #define UB_CTRL_TIMEOUT (HZ/2) /* 500ms ought to be enough to clear a stall */ | |
170 | ||
171 | /* | |
172 | * An instance of a SCSI command in transit. | |
173 | */ | |
174 | #define UB_DIR_NONE 0 | |
175 | #define UB_DIR_READ 1 | |
176 | #define UB_DIR_ILLEGAL2 2 | |
177 | #define UB_DIR_WRITE 3 | |
178 | ||
179 | #define UB_DIR_CHAR(c) (((c)==UB_DIR_WRITE)? 'w': \ | |
180 | (((c)==UB_DIR_READ)? 'r': 'n')) | |
181 | ||
182 | enum ub_scsi_cmd_state { | |
183 | UB_CMDST_INIT, /* Initial state */ | |
184 | UB_CMDST_CMD, /* Command submitted */ | |
185 | UB_CMDST_DATA, /* Data phase */ | |
186 | UB_CMDST_CLR2STS, /* Clearing before requesting status */ | |
187 | UB_CMDST_STAT, /* Status phase */ | |
188 | UB_CMDST_CLEAR, /* Clearing a stall (halt, actually) */ | |
1872bceb | 189 | UB_CMDST_CLRRS, /* Clearing before retrying status */ |
1da177e4 LT |
190 | UB_CMDST_SENSE, /* Sending Request Sense */ |
191 | UB_CMDST_DONE /* Final state */ | |
192 | }; | |
193 | ||
1da177e4 LT |
194 | struct ub_scsi_cmd { |
195 | unsigned char cdb[UB_MAX_CDB_SIZE]; | |
196 | unsigned char cdb_len; | |
197 | ||
198 | unsigned char dir; /* 0 - none, 1 - read, 3 - write. */ | |
1da177e4 LT |
199 | enum ub_scsi_cmd_state state; |
200 | unsigned int tag; | |
201 | struct ub_scsi_cmd *next; | |
202 | ||
203 | int error; /* Return code - valid upon done */ | |
204 | unsigned int act_len; /* Return size */ | |
205 | unsigned char key, asc, ascq; /* May be valid if error==-EIO */ | |
206 | ||
207 | int stat_count; /* Retries getting status. */ | |
2c51ae70 | 208 | unsigned int timeo; /* jiffies until rq->timeout changes */ |
1da177e4 | 209 | |
1da177e4 | 210 | unsigned int len; /* Requested length */ |
a1cf96ef PZ |
211 | unsigned int current_sg; |
212 | unsigned int nsg; /* sgv[nsg] */ | |
213 | struct scatterlist sgv[UB_MAX_REQ_SG]; | |
1da177e4 | 214 | |
f4800078 | 215 | struct ub_lun *lun; |
1da177e4 LT |
216 | void (*done)(struct ub_dev *, struct ub_scsi_cmd *); |
217 | void *back; | |
218 | }; | |
219 | ||
2c26c9e6 PZ |
220 | struct ub_request { |
221 | struct request *rq; | |
222 | unsigned int current_try; | |
223 | unsigned int nsg; /* sgv[nsg] */ | |
224 | struct scatterlist sgv[UB_MAX_REQ_SG]; | |
225 | }; | |
226 | ||
1da177e4 LT |
227 | /* |
228 | */ | |
229 | struct ub_capacity { | |
230 | unsigned long nsec; /* Linux size - 512 byte sectors */ | |
231 | unsigned int bsize; /* Linux hardsect_size */ | |
232 | unsigned int bshift; /* Shift between 512 and hard sects */ | |
233 | }; | |
234 | ||
1da177e4 LT |
235 | /* |
236 | * This is a direct take-off from linux/include/completion.h | |
237 | * The difference is that I do not wait on this thing, just poll. | |
238 | * When I want to wait (ub_probe), I just use the stock completion. | |
239 | * | |
240 | * Note that INIT_COMPLETION takes no lock. It is correct. But why | |
241 | * in the bloody hell that thing takes struct instead of pointer to struct | |
242 | * is quite beyond me. I just copied it from the stock completion. | |
243 | */ | |
244 | struct ub_completion { | |
245 | unsigned int done; | |
246 | spinlock_t lock; | |
247 | }; | |
248 | ||
249 | static inline void ub_init_completion(struct ub_completion *x) | |
250 | { | |
251 | x->done = 0; | |
252 | spin_lock_init(&x->lock); | |
253 | } | |
254 | ||
255 | #define UB_INIT_COMPLETION(x) ((x).done = 0) | |
256 | ||
257 | static void ub_complete(struct ub_completion *x) | |
258 | { | |
259 | unsigned long flags; | |
260 | ||
261 | spin_lock_irqsave(&x->lock, flags); | |
262 | x->done++; | |
263 | spin_unlock_irqrestore(&x->lock, flags); | |
264 | } | |
265 | ||
266 | static int ub_is_completed(struct ub_completion *x) | |
267 | { | |
268 | unsigned long flags; | |
269 | int ret; | |
270 | ||
271 | spin_lock_irqsave(&x->lock, flags); | |
272 | ret = x->done; | |
273 | spin_unlock_irqrestore(&x->lock, flags); | |
274 | return ret; | |
275 | } | |
276 | ||
277 | /* | |
278 | */ | |
279 | struct ub_scsi_cmd_queue { | |
280 | int qlen, qmax; | |
281 | struct ub_scsi_cmd *head, *tail; | |
282 | }; | |
283 | ||
284 | /* | |
f4800078 PZ |
285 | * The block device instance (one per LUN). |
286 | */ | |
287 | struct ub_lun { | |
288 | struct ub_dev *udev; | |
289 | struct list_head link; | |
290 | struct gendisk *disk; | |
291 | int id; /* Host index */ | |
292 | int num; /* LUN number */ | |
293 | char name[16]; | |
294 | ||
295 | int changed; /* Media was changed */ | |
296 | int removable; | |
297 | int readonly; | |
f4800078 | 298 | |
2c26c9e6 PZ |
299 | struct ub_request urq; |
300 | ||
f4800078 PZ |
301 | /* Use Ingo's mempool if or when we have more than one command. */ |
302 | /* | |
303 | * Currently we never need more than one command for the whole device. | |
304 | * However, giving every LUN a command is a cheap and automatic way | |
305 | * to enforce fairness between them. | |
306 | */ | |
307 | int cmda[1]; | |
308 | struct ub_scsi_cmd cmdv[1]; | |
309 | ||
310 | struct ub_capacity capacity; | |
311 | }; | |
312 | ||
313 | /* | |
314 | * The USB device instance. | |
1da177e4 LT |
315 | */ |
316 | struct ub_dev { | |
65b4fe55 | 317 | spinlock_t *lock; |
1da177e4 LT |
318 | atomic_t poison; /* The USB device is disconnected */ |
319 | int openc; /* protected by ub_lock! */ | |
320 | /* kref is too implicit for our taste */ | |
2c26c9e6 | 321 | int reset; /* Reset is running */ |
0da13c8c | 322 | int bad_resid; |
1da177e4 | 323 | unsigned int tagcnt; |
f4800078 | 324 | char name[12]; |
1da177e4 LT |
325 | struct usb_device *dev; |
326 | struct usb_interface *intf; | |
327 | ||
f4800078 | 328 | struct list_head luns; |
1da177e4 LT |
329 | |
330 | unsigned int send_bulk_pipe; /* cached pipe values */ | |
331 | unsigned int recv_bulk_pipe; | |
332 | unsigned int send_ctrl_pipe; | |
333 | unsigned int recv_ctrl_pipe; | |
334 | ||
335 | struct tasklet_struct tasklet; | |
336 | ||
1da177e4 LT |
337 | struct ub_scsi_cmd_queue cmd_queue; |
338 | struct ub_scsi_cmd top_rqs_cmd; /* REQUEST SENSE */ | |
339 | unsigned char top_sense[UB_SENSE_SIZE]; | |
340 | ||
341 | struct ub_completion work_done; | |
342 | struct urb work_urb; | |
343 | struct timer_list work_timer; | |
344 | int last_pipe; /* What might need clearing */ | |
1872bceb | 345 | __le32 signature; /* Learned signature */ |
1da177e4 LT |
346 | struct bulk_cb_wrap work_bcb; |
347 | struct bulk_cs_wrap work_bcs; | |
348 | struct usb_ctrlrequest work_cr; | |
349 | ||
2c26c9e6 PZ |
350 | struct work_struct reset_work; |
351 | wait_queue_head_t reset_wait; | |
352 | ||
64bd8453 | 353 | int sg_stat[6]; |
1da177e4 LT |
354 | }; |
355 | ||
356 | /* | |
357 | */ | |
358 | static void ub_cleanup(struct ub_dev *sc); | |
6c1eb8c1 | 359 | static int ub_request_fn_1(struct ub_lun *lun, struct request *rq); |
2c26c9e6 PZ |
360 | static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, |
361 | struct ub_scsi_cmd *cmd, struct ub_request *urq); | |
362 | static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, | |
363 | struct ub_scsi_cmd *cmd, struct ub_request *urq); | |
1da177e4 | 364 | static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd); |
ef45cb62 PZ |
365 | static void ub_end_rq(struct request *rq, unsigned int status, |
366 | unsigned int cmd_len); | |
2c26c9e6 PZ |
367 | static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun, |
368 | struct ub_request *urq, struct ub_scsi_cmd *cmd); | |
1da177e4 | 369 | static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd); |
7d12e780 | 370 | static void ub_urb_complete(struct urb *urb); |
1da177e4 LT |
371 | static void ub_scsi_action(unsigned long _dev); |
372 | static void ub_scsi_dispatch(struct ub_dev *sc); | |
373 | static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd); | |
a1cf96ef | 374 | static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd); |
1da177e4 | 375 | static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc); |
1872bceb | 376 | static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd); |
1da177e4 | 377 | static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd); |
1872bceb | 378 | static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd); |
1da177e4 LT |
379 | static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd); |
380 | static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd, | |
381 | int stalled_pipe); | |
382 | static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd); | |
2c2e4a2e | 383 | static void ub_reset_enter(struct ub_dev *sc, int try); |
c4028958 | 384 | static void ub_reset_task(struct work_struct *work); |
f4800078 PZ |
385 | static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun); |
386 | static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, | |
387 | struct ub_capacity *ret); | |
2c2e4a2e PZ |
388 | static int ub_sync_reset(struct ub_dev *sc); |
389 | static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe); | |
f4800078 | 390 | static int ub_probe_lun(struct ub_dev *sc, int lnum); |
1da177e4 LT |
391 | |
392 | /* | |
393 | */ | |
a00828e9 PZ |
394 | #ifdef CONFIG_USB_LIBUSUAL |
395 | ||
396 | #define ub_usb_ids storage_usb_ids | |
397 | #else | |
398 | ||
1da177e4 | 399 | static struct usb_device_id ub_usb_ids[] = { |
1da177e4 LT |
400 | { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_SCSI, US_PR_BULK) }, |
401 | { } | |
402 | }; | |
403 | ||
404 | MODULE_DEVICE_TABLE(usb, ub_usb_ids); | |
a00828e9 | 405 | #endif /* CONFIG_USB_LIBUSUAL */ |
1da177e4 LT |
406 | |
407 | /* | |
408 | * Find me a way to identify "next free minor" for add_disk(), | |
409 | * and the array disappears the next day. However, the number of | |
410 | * hosts has something to do with the naming and /proc/partitions. | |
411 | * This has to be thought out in detail before changing. | |
412 | * If UB_MAX_HOST was 1000, we'd use a bitmap. Or a better data structure. | |
413 | */ | |
414 | #define UB_MAX_HOSTS 26 | |
415 | static char ub_hostv[UB_MAX_HOSTS]; | |
f4800078 | 416 | |
65b4fe55 PZ |
417 | #define UB_QLOCK_NUM 5 |
418 | static spinlock_t ub_qlockv[UB_QLOCK_NUM]; | |
419 | static int ub_qlock_next = 0; | |
420 | ||
1da177e4 LT |
421 | static DEFINE_SPINLOCK(ub_lock); /* Locks globals and ->openc */ |
422 | ||
1da177e4 LT |
423 | /* |
424 | * The id allocator. | |
425 | * | |
426 | * This also stores the host for indexing by minor, which is somewhat dirty. | |
427 | */ | |
428 | static int ub_id_get(void) | |
429 | { | |
430 | unsigned long flags; | |
431 | int i; | |
432 | ||
433 | spin_lock_irqsave(&ub_lock, flags); | |
434 | for (i = 0; i < UB_MAX_HOSTS; i++) { | |
435 | if (ub_hostv[i] == 0) { | |
436 | ub_hostv[i] = 1; | |
437 | spin_unlock_irqrestore(&ub_lock, flags); | |
438 | return i; | |
439 | } | |
440 | } | |
441 | spin_unlock_irqrestore(&ub_lock, flags); | |
442 | return -1; | |
443 | } | |
444 | ||
445 | static void ub_id_put(int id) | |
446 | { | |
447 | unsigned long flags; | |
448 | ||
449 | if (id < 0 || id >= UB_MAX_HOSTS) { | |
450 | printk(KERN_ERR DRV_NAME ": bad host ID %d\n", id); | |
451 | return; | |
452 | } | |
453 | ||
454 | spin_lock_irqsave(&ub_lock, flags); | |
455 | if (ub_hostv[id] == 0) { | |
456 | spin_unlock_irqrestore(&ub_lock, flags); | |
457 | printk(KERN_ERR DRV_NAME ": freeing free host ID %d\n", id); | |
458 | return; | |
459 | } | |
460 | ub_hostv[id] = 0; | |
461 | spin_unlock_irqrestore(&ub_lock, flags); | |
462 | } | |
463 | ||
65b4fe55 PZ |
464 | /* |
465 | * This is necessitated by the fact that blk_cleanup_queue does not | |
466 | * necesserily destroy the queue. Instead, it may merely decrease q->refcnt. | |
467 | * Since our blk_init_queue() passes a spinlock common with ub_dev, | |
468 | * we have life time issues when ub_cleanup frees ub_dev. | |
469 | */ | |
470 | static spinlock_t *ub_next_lock(void) | |
471 | { | |
472 | unsigned long flags; | |
473 | spinlock_t *ret; | |
474 | ||
475 | spin_lock_irqsave(&ub_lock, flags); | |
476 | ret = &ub_qlockv[ub_qlock_next]; | |
477 | ub_qlock_next = (ub_qlock_next + 1) % UB_QLOCK_NUM; | |
478 | spin_unlock_irqrestore(&ub_lock, flags); | |
479 | return ret; | |
480 | } | |
481 | ||
1da177e4 LT |
482 | /* |
483 | * Downcount for deallocation. This rides on two assumptions: | |
484 | * - once something is poisoned, its refcount cannot grow | |
485 | * - opens cannot happen at this time (del_gendisk was done) | |
486 | * If the above is true, we can drop the lock, which we need for | |
487 | * blk_cleanup_queue(): the silly thing may attempt to sleep. | |
488 | * [Actually, it never needs to sleep for us, but it calls might_sleep()] | |
489 | */ | |
490 | static void ub_put(struct ub_dev *sc) | |
491 | { | |
492 | unsigned long flags; | |
493 | ||
494 | spin_lock_irqsave(&ub_lock, flags); | |
495 | --sc->openc; | |
496 | if (sc->openc == 0 && atomic_read(&sc->poison)) { | |
497 | spin_unlock_irqrestore(&ub_lock, flags); | |
498 | ub_cleanup(sc); | |
499 | } else { | |
500 | spin_unlock_irqrestore(&ub_lock, flags); | |
501 | } | |
502 | } | |
503 | ||
504 | /* | |
505 | * Final cleanup and deallocation. | |
506 | */ | |
507 | static void ub_cleanup(struct ub_dev *sc) | |
508 | { | |
f4800078 PZ |
509 | struct list_head *p; |
510 | struct ub_lun *lun; | |
165125e1 | 511 | struct request_queue *q; |
1da177e4 | 512 | |
f4800078 PZ |
513 | while (!list_empty(&sc->luns)) { |
514 | p = sc->luns.next; | |
515 | lun = list_entry(p, struct ub_lun, link); | |
516 | list_del(p); | |
1da177e4 | 517 | |
f4800078 PZ |
518 | /* I don't think queue can be NULL. But... Stolen from sx8.c */ |
519 | if ((q = lun->disk->queue) != NULL) | |
520 | blk_cleanup_queue(q); | |
521 | /* | |
522 | * If we zero disk->private_data BEFORE put_disk, we have | |
523 | * to check for NULL all over the place in open, release, | |
524 | * check_media and revalidate, because the block level | |
525 | * semaphore is well inside the put_disk. | |
526 | * But we cannot zero after the call, because *disk is gone. | |
527 | * The sd.c is blatantly racy in this area. | |
528 | */ | |
529 | /* disk->private_data = NULL; */ | |
530 | put_disk(lun->disk); | |
531 | lun->disk = NULL; | |
532 | ||
533 | ub_id_put(lun->id); | |
534 | kfree(lun); | |
535 | } | |
1da177e4 | 536 | |
77ef6c4d PZ |
537 | usb_set_intfdata(sc->intf, NULL); |
538 | usb_put_intf(sc->intf); | |
539 | usb_put_dev(sc->dev); | |
1da177e4 LT |
540 | kfree(sc); |
541 | } | |
542 | ||
543 | /* | |
544 | * The "command allocator". | |
545 | */ | |
f4800078 | 546 | static struct ub_scsi_cmd *ub_get_cmd(struct ub_lun *lun) |
1da177e4 LT |
547 | { |
548 | struct ub_scsi_cmd *ret; | |
549 | ||
f4800078 | 550 | if (lun->cmda[0]) |
1da177e4 | 551 | return NULL; |
f4800078 PZ |
552 | ret = &lun->cmdv[0]; |
553 | lun->cmda[0] = 1; | |
1da177e4 LT |
554 | return ret; |
555 | } | |
556 | ||
f4800078 | 557 | static void ub_put_cmd(struct ub_lun *lun, struct ub_scsi_cmd *cmd) |
1da177e4 | 558 | { |
f4800078 | 559 | if (cmd != &lun->cmdv[0]) { |
1da177e4 | 560 | printk(KERN_WARNING "%s: releasing a foreign cmd %p\n", |
f4800078 | 561 | lun->name, cmd); |
1da177e4 LT |
562 | return; |
563 | } | |
f4800078 PZ |
564 | if (!lun->cmda[0]) { |
565 | printk(KERN_WARNING "%s: releasing a free cmd\n", lun->name); | |
1da177e4 LT |
566 | return; |
567 | } | |
f4800078 | 568 | lun->cmda[0] = 0; |
1da177e4 LT |
569 | } |
570 | ||
571 | /* | |
572 | * The command queue. | |
573 | */ | |
574 | static void ub_cmdq_add(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |
575 | { | |
576 | struct ub_scsi_cmd_queue *t = &sc->cmd_queue; | |
577 | ||
578 | if (t->qlen++ == 0) { | |
579 | t->head = cmd; | |
580 | t->tail = cmd; | |
581 | } else { | |
582 | t->tail->next = cmd; | |
583 | t->tail = cmd; | |
584 | } | |
585 | ||
586 | if (t->qlen > t->qmax) | |
587 | t->qmax = t->qlen; | |
588 | } | |
589 | ||
590 | static void ub_cmdq_insert(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |
591 | { | |
592 | struct ub_scsi_cmd_queue *t = &sc->cmd_queue; | |
593 | ||
594 | if (t->qlen++ == 0) { | |
595 | t->head = cmd; | |
596 | t->tail = cmd; | |
597 | } else { | |
598 | cmd->next = t->head; | |
599 | t->head = cmd; | |
600 | } | |
601 | ||
602 | if (t->qlen > t->qmax) | |
603 | t->qmax = t->qlen; | |
604 | } | |
605 | ||
606 | static struct ub_scsi_cmd *ub_cmdq_pop(struct ub_dev *sc) | |
607 | { | |
608 | struct ub_scsi_cmd_queue *t = &sc->cmd_queue; | |
609 | struct ub_scsi_cmd *cmd; | |
610 | ||
611 | if (t->qlen == 0) | |
612 | return NULL; | |
613 | if (--t->qlen == 0) | |
614 | t->tail = NULL; | |
615 | cmd = t->head; | |
616 | t->head = cmd->next; | |
617 | cmd->next = NULL; | |
618 | return cmd; | |
619 | } | |
620 | ||
621 | #define ub_cmdq_peek(sc) ((sc)->cmd_queue.head) | |
622 | ||
623 | /* | |
624 | * The request function is our main entry point | |
625 | */ | |
626 | ||
165125e1 | 627 | static void ub_request_fn(struct request_queue *q) |
1da177e4 | 628 | { |
f4800078 | 629 | struct ub_lun *lun = q->queuedata; |
1da177e4 LT |
630 | struct request *rq; |
631 | ||
632 | while ((rq = elv_next_request(q)) != NULL) { | |
6c1eb8c1 | 633 | if (ub_request_fn_1(lun, rq) != 0) { |
1da177e4 LT |
634 | blk_stop_queue(q); |
635 | break; | |
636 | } | |
637 | } | |
638 | } | |
639 | ||
6c1eb8c1 | 640 | static int ub_request_fn_1(struct ub_lun *lun, struct request *rq) |
1da177e4 | 641 | { |
f4800078 | 642 | struct ub_dev *sc = lun->udev; |
1da177e4 | 643 | struct ub_scsi_cmd *cmd; |
2c26c9e6 PZ |
644 | struct ub_request *urq; |
645 | int n_elem; | |
1da177e4 | 646 | |
d1ad4ea3 | 647 | if (atomic_read(&sc->poison)) { |
1da177e4 | 648 | blkdev_dequeue_request(rq); |
ef45cb62 | 649 | ub_end_rq(rq, DID_NO_CONNECT << 16, blk_rq_bytes(rq)); |
d1ad4ea3 PZ |
650 | return 0; |
651 | } | |
652 | ||
653 | if (lun->changed && !blk_pc_request(rq)) { | |
654 | blkdev_dequeue_request(rq); | |
ef45cb62 | 655 | ub_end_rq(rq, SAM_STAT_CHECK_CONDITION, blk_rq_bytes(rq)); |
1da177e4 LT |
656 | return 0; |
657 | } | |
658 | ||
2c26c9e6 PZ |
659 | if (lun->urq.rq != NULL) |
660 | return -1; | |
f4800078 | 661 | if ((cmd = ub_get_cmd(lun)) == NULL) |
1da177e4 LT |
662 | return -1; |
663 | memset(cmd, 0, sizeof(struct ub_scsi_cmd)); | |
664 | ||
665 | blkdev_dequeue_request(rq); | |
2c26c9e6 PZ |
666 | |
667 | urq = &lun->urq; | |
668 | memset(urq, 0, sizeof(struct ub_request)); | |
669 | urq->rq = rq; | |
670 | ||
671 | /* | |
672 | * get scatterlist from block layer | |
673 | */ | |
541645be | 674 | sg_init_table(&urq->sgv[0], UB_MAX_REQ_SG); |
2c26c9e6 PZ |
675 | n_elem = blk_rq_map_sg(lun->disk->queue, rq, &urq->sgv[0]); |
676 | if (n_elem < 0) { | |
b5600339 | 677 | /* Impossible, because blk_rq_map_sg should not hit ENOMEM. */ |
2c26c9e6 | 678 | printk(KERN_INFO "%s: failed request map (%d)\n", |
b5600339 | 679 | lun->name, n_elem); |
2c26c9e6 PZ |
680 | goto drop; |
681 | } | |
682 | if (n_elem > UB_MAX_REQ_SG) { /* Paranoia */ | |
683 | printk(KERN_WARNING "%s: request with %d segments\n", | |
684 | lun->name, n_elem); | |
685 | goto drop; | |
686 | } | |
687 | urq->nsg = n_elem; | |
688 | sc->sg_stat[n_elem < 5 ? n_elem : 5]++; | |
689 | ||
1da177e4 | 690 | if (blk_pc_request(rq)) { |
2c26c9e6 | 691 | ub_cmd_build_packet(sc, lun, cmd, urq); |
1da177e4 | 692 | } else { |
2c26c9e6 | 693 | ub_cmd_build_block(sc, lun, cmd, urq); |
1da177e4 | 694 | } |
1da177e4 | 695 | cmd->state = UB_CMDST_INIT; |
f4800078 | 696 | cmd->lun = lun; |
1da177e4 | 697 | cmd->done = ub_rw_cmd_done; |
2c26c9e6 | 698 | cmd->back = urq; |
1da177e4 LT |
699 | |
700 | cmd->tag = sc->tagcnt++; | |
2c26c9e6 PZ |
701 | if (ub_submit_scsi(sc, cmd) != 0) |
702 | goto drop; | |
703 | ||
704 | return 0; | |
1da177e4 | 705 | |
2c26c9e6 PZ |
706 | drop: |
707 | ub_put_cmd(lun, cmd); | |
ef45cb62 | 708 | ub_end_rq(rq, DID_ERROR << 16, blk_rq_bytes(rq)); |
1da177e4 LT |
709 | return 0; |
710 | } | |
711 | ||
2c26c9e6 PZ |
712 | static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, |
713 | struct ub_scsi_cmd *cmd, struct ub_request *urq) | |
1da177e4 | 714 | { |
2c26c9e6 | 715 | struct request *rq = urq->rq; |
a1cf96ef | 716 | unsigned int block, nblks; |
1da177e4 LT |
717 | |
718 | if (rq_data_dir(rq) == WRITE) | |
2c26c9e6 | 719 | cmd->dir = UB_DIR_WRITE; |
1da177e4 | 720 | else |
2c26c9e6 | 721 | cmd->dir = UB_DIR_READ; |
1da177e4 | 722 | |
2c26c9e6 PZ |
723 | cmd->nsg = urq->nsg; |
724 | memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg); | |
1da177e4 LT |
725 | |
726 | /* | |
727 | * build the command | |
728 | * | |
729 | * The call to blk_queue_hardsect_size() guarantees that request | |
730 | * is aligned, but it is given in terms of 512 byte units, always. | |
731 | */ | |
a1cf96ef PZ |
732 | block = rq->sector >> lun->capacity.bshift; |
733 | nblks = rq->nr_sectors >> lun->capacity.bshift; | |
ba6abf13 | 734 | |
2c26c9e6 | 735 | cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10; |
1da177e4 LT |
736 | /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */ |
737 | cmd->cdb[2] = block >> 24; | |
738 | cmd->cdb[3] = block >> 16; | |
739 | cmd->cdb[4] = block >> 8; | |
740 | cmd->cdb[5] = block; | |
741 | cmd->cdb[7] = nblks >> 8; | |
742 | cmd->cdb[8] = nblks; | |
743 | cmd->cdb_len = 10; | |
744 | ||
a1cf96ef | 745 | cmd->len = rq->nr_sectors * 512; |
1da177e4 LT |
746 | } |
747 | ||
2c26c9e6 PZ |
748 | static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, |
749 | struct ub_scsi_cmd *cmd, struct ub_request *urq) | |
1da177e4 | 750 | { |
2c26c9e6 | 751 | struct request *rq = urq->rq; |
1da177e4 LT |
752 | |
753 | if (rq->data_len == 0) { | |
754 | cmd->dir = UB_DIR_NONE; | |
755 | } else { | |
756 | if (rq_data_dir(rq) == WRITE) | |
757 | cmd->dir = UB_DIR_WRITE; | |
758 | else | |
759 | cmd->dir = UB_DIR_READ; | |
760 | } | |
a1cf96ef | 761 | |
2c26c9e6 PZ |
762 | cmd->nsg = urq->nsg; |
763 | memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg); | |
a1cf96ef PZ |
764 | |
765 | memcpy(&cmd->cdb, rq->cmd, rq->cmd_len); | |
766 | cmd->cdb_len = rq->cmd_len; | |
767 | ||
1da177e4 | 768 | cmd->len = rq->data_len; |
2c51ae70 PZ |
769 | |
770 | /* | |
771 | * To reapply this to every URB is not as incorrect as it looks. | |
772 | * In return, we avoid any complicated tracking calculations. | |
773 | */ | |
774 | cmd->timeo = rq->timeout; | |
1da177e4 LT |
775 | } |
776 | ||
777 | static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |
778 | { | |
f4800078 | 779 | struct ub_lun *lun = cmd->lun; |
2c26c9e6 PZ |
780 | struct ub_request *urq = cmd->back; |
781 | struct request *rq; | |
d1ad4ea3 | 782 | unsigned int scsi_status; |
ef45cb62 | 783 | unsigned int cmd_len; |
1da177e4 | 784 | |
2c26c9e6 PZ |
785 | rq = urq->rq; |
786 | ||
a1cf96ef | 787 | if (cmd->error == 0) { |
a1cf96ef PZ |
788 | if (blk_pc_request(rq)) { |
789 | if (cmd->act_len >= rq->data_len) | |
790 | rq->data_len = 0; | |
791 | else | |
792 | rq->data_len -= cmd->act_len; | |
ef45cb62 PZ |
793 | scsi_status = 0; |
794 | } else { | |
795 | if (cmd->act_len != cmd->len) { | |
ef45cb62 PZ |
796 | scsi_status = SAM_STAT_CHECK_CONDITION; |
797 | } else { | |
798 | scsi_status = 0; | |
799 | } | |
ba6abf13 | 800 | } |
a1cf96ef | 801 | } else { |
a1cf96ef PZ |
802 | if (blk_pc_request(rq)) { |
803 | /* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */ | |
804 | memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE); | |
805 | rq->sense_len = UB_SENSE_SIZE; | |
806 | if (sc->top_sense[0] != 0) | |
d1ad4ea3 | 807 | scsi_status = SAM_STAT_CHECK_CONDITION; |
a1cf96ef | 808 | else |
d1ad4ea3 | 809 | scsi_status = DID_ERROR << 16; |
2c26c9e6 | 810 | } else { |
82fe26ba PZ |
811 | if (cmd->error == -EIO && |
812 | (cmd->key == 0 || | |
813 | cmd->key == MEDIUM_ERROR || | |
814 | cmd->key == UNIT_ATTENTION)) { | |
2c26c9e6 PZ |
815 | if (ub_rw_cmd_retry(sc, lun, urq, cmd) == 0) |
816 | return; | |
817 | } | |
d1ad4ea3 | 818 | scsi_status = SAM_STAT_CHECK_CONDITION; |
a1cf96ef PZ |
819 | } |
820 | } | |
ba6abf13 | 821 | |
2c26c9e6 PZ |
822 | urq->rq = NULL; |
823 | ||
ef45cb62 | 824 | cmd_len = cmd->len; |
f4800078 | 825 | ub_put_cmd(lun, cmd); |
ef45cb62 | 826 | ub_end_rq(rq, scsi_status, cmd_len); |
ba6abf13 | 827 | blk_start_queue(lun->disk->queue); |
1da177e4 LT |
828 | } |
829 | ||
ef45cb62 PZ |
830 | static void ub_end_rq(struct request *rq, unsigned int scsi_status, |
831 | unsigned int cmd_len) | |
1da177e4 | 832 | { |
7d699baf | 833 | int error; |
ef45cb62 | 834 | long rqlen; |
d1ad4ea3 PZ |
835 | |
836 | if (scsi_status == 0) { | |
7d699baf | 837 | error = 0; |
d1ad4ea3 | 838 | } else { |
7d699baf | 839 | error = -EIO; |
d1ad4ea3 PZ |
840 | rq->errors = scsi_status; |
841 | } | |
ef45cb62 PZ |
842 | rqlen = blk_rq_bytes(rq); /* Oddly enough, this is the residue. */ |
843 | if (__blk_end_request(rq, error, cmd_len)) { | |
844 | printk(KERN_WARNING DRV_NAME | |
845 | ": __blk_end_request blew, %s-cmd total %u rqlen %ld\n", | |
846 | blk_pc_request(rq)? "pc": "fs", cmd_len, rqlen); | |
847 | } | |
1da177e4 LT |
848 | } |
849 | ||
2c26c9e6 PZ |
850 | static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun, |
851 | struct ub_request *urq, struct ub_scsi_cmd *cmd) | |
852 | { | |
853 | ||
854 | if (atomic_read(&sc->poison)) | |
855 | return -ENXIO; | |
856 | ||
2c2e4a2e | 857 | ub_reset_enter(sc, urq->current_try); |
2c26c9e6 PZ |
858 | |
859 | if (urq->current_try >= 3) | |
860 | return -EIO; | |
861 | urq->current_try++; | |
b5600339 PZ |
862 | |
863 | /* Remove this if anyone complains of flooding. */ | |
864 | printk(KERN_DEBUG "%s: dir %c len/act %d/%d " | |
2c26c9e6 PZ |
865 | "[sense %x %02x %02x] retry %d\n", |
866 | sc->name, UB_DIR_CHAR(cmd->dir), cmd->len, cmd->act_len, | |
867 | cmd->key, cmd->asc, cmd->ascq, urq->current_try); | |
868 | ||
869 | memset(cmd, 0, sizeof(struct ub_scsi_cmd)); | |
870 | ub_cmd_build_block(sc, lun, cmd, urq); | |
871 | ||
872 | cmd->state = UB_CMDST_INIT; | |
873 | cmd->lun = lun; | |
874 | cmd->done = ub_rw_cmd_done; | |
875 | cmd->back = urq; | |
876 | ||
877 | cmd->tag = sc->tagcnt++; | |
878 | ||
879 | #if 0 /* Wasteful */ | |
880 | return ub_submit_scsi(sc, cmd); | |
881 | #else | |
882 | ub_cmdq_add(sc, cmd); | |
883 | return 0; | |
884 | #endif | |
885 | } | |
886 | ||
1da177e4 LT |
887 | /* |
888 | * Submit a regular SCSI operation (not an auto-sense). | |
889 | * | |
890 | * The Iron Law of Good Submit Routine is: | |
891 | * Zero return - callback is done, Nonzero return - callback is not done. | |
892 | * No exceptions. | |
893 | * | |
894 | * Host is assumed locked. | |
1da177e4 LT |
895 | */ |
896 | static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |
897 | { | |
898 | ||
899 | if (cmd->state != UB_CMDST_INIT || | |
900 | (cmd->dir != UB_DIR_NONE && cmd->len == 0)) { | |
901 | return -EINVAL; | |
902 | } | |
903 | ||
904 | ub_cmdq_add(sc, cmd); | |
905 | /* | |
906 | * We can call ub_scsi_dispatch(sc) right away here, but it's a little | |
907 | * safer to jump to a tasklet, in case upper layers do something silly. | |
908 | */ | |
909 | tasklet_schedule(&sc->tasklet); | |
910 | return 0; | |
911 | } | |
912 | ||
913 | /* | |
914 | * Submit the first URB for the queued command. | |
915 | * This function does not deal with queueing in any way. | |
916 | */ | |
917 | static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |
918 | { | |
919 | struct bulk_cb_wrap *bcb; | |
920 | int rc; | |
921 | ||
922 | bcb = &sc->work_bcb; | |
923 | ||
924 | /* | |
925 | * ``If the allocation length is eighteen or greater, and a device | |
926 | * server returns less than eithteen bytes of data, the application | |
927 | * client should assume that the bytes not transferred would have been | |
928 | * zeroes had the device server returned those bytes.'' | |
929 | * | |
930 | * We zero sense for all commands so that when a packet request | |
931 | * fails it does not return a stale sense. | |
932 | */ | |
933 | memset(&sc->top_sense, 0, UB_SENSE_SIZE); | |
934 | ||
935 | /* set up the command wrapper */ | |
936 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); | |
937 | bcb->Tag = cmd->tag; /* Endianness is not important */ | |
938 | bcb->DataTransferLength = cpu_to_le32(cmd->len); | |
939 | bcb->Flags = (cmd->dir == UB_DIR_READ) ? 0x80 : 0; | |
f4800078 | 940 | bcb->Lun = (cmd->lun != NULL) ? cmd->lun->num : 0; |
1da177e4 LT |
941 | bcb->Length = cmd->cdb_len; |
942 | ||
943 | /* copy the command payload */ | |
944 | memcpy(bcb->CDB, cmd->cdb, UB_MAX_CDB_SIZE); | |
945 | ||
946 | UB_INIT_COMPLETION(sc->work_done); | |
947 | ||
948 | sc->last_pipe = sc->send_bulk_pipe; | |
949 | usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe, | |
950 | bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc); | |
1da177e4 | 951 | |
1da177e4 LT |
952 | if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { |
953 | /* XXX Clear stalls */ | |
1da177e4 LT |
954 | ub_complete(&sc->work_done); |
955 | return rc; | |
956 | } | |
957 | ||
958 | sc->work_timer.expires = jiffies + UB_URB_TIMEOUT; | |
959 | add_timer(&sc->work_timer); | |
960 | ||
961 | cmd->state = UB_CMDST_CMD; | |
1da177e4 LT |
962 | return 0; |
963 | } | |
964 | ||
965 | /* | |
966 | * Timeout handler. | |
967 | */ | |
968 | static void ub_urb_timeout(unsigned long arg) | |
969 | { | |
970 | struct ub_dev *sc = (struct ub_dev *) arg; | |
971 | unsigned long flags; | |
972 | ||
65b4fe55 | 973 | spin_lock_irqsave(sc->lock, flags); |
b31f821c PZ |
974 | if (!ub_is_completed(&sc->work_done)) |
975 | usb_unlink_urb(&sc->work_urb); | |
65b4fe55 | 976 | spin_unlock_irqrestore(sc->lock, flags); |
1da177e4 LT |
977 | } |
978 | ||
979 | /* | |
980 | * Completion routine for the work URB. | |
981 | * | |
982 | * This can be called directly from usb_submit_urb (while we have | |
983 | * the sc->lock taken) and from an interrupt (while we do NOT have | |
984 | * the sc->lock taken). Therefore, bounce this off to a tasklet. | |
985 | */ | |
7d12e780 | 986 | static void ub_urb_complete(struct urb *urb) |
1da177e4 LT |
987 | { |
988 | struct ub_dev *sc = urb->context; | |
989 | ||
990 | ub_complete(&sc->work_done); | |
991 | tasklet_schedule(&sc->tasklet); | |
992 | } | |
993 | ||
994 | static void ub_scsi_action(unsigned long _dev) | |
995 | { | |
996 | struct ub_dev *sc = (struct ub_dev *) _dev; | |
997 | unsigned long flags; | |
998 | ||
65b4fe55 | 999 | spin_lock_irqsave(sc->lock, flags); |
1da177e4 | 1000 | ub_scsi_dispatch(sc); |
65b4fe55 | 1001 | spin_unlock_irqrestore(sc->lock, flags); |
1da177e4 LT |
1002 | } |
1003 | ||
1004 | static void ub_scsi_dispatch(struct ub_dev *sc) | |
1005 | { | |
1006 | struct ub_scsi_cmd *cmd; | |
1007 | int rc; | |
1008 | ||
2c26c9e6 | 1009 | while (!sc->reset && (cmd = ub_cmdq_peek(sc)) != NULL) { |
1da177e4 LT |
1010 | if (cmd->state == UB_CMDST_DONE) { |
1011 | ub_cmdq_pop(sc); | |
1012 | (*cmd->done)(sc, cmd); | |
1013 | } else if (cmd->state == UB_CMDST_INIT) { | |
1da177e4 LT |
1014 | if ((rc = ub_scsi_cmd_start(sc, cmd)) == 0) |
1015 | break; | |
1016 | cmd->error = rc; | |
1017 | cmd->state = UB_CMDST_DONE; | |
1da177e4 LT |
1018 | } else { |
1019 | if (!ub_is_completed(&sc->work_done)) | |
1020 | break; | |
b31f821c | 1021 | del_timer(&sc->work_timer); |
1da177e4 LT |
1022 | ub_scsi_urb_compl(sc, cmd); |
1023 | } | |
1024 | } | |
1025 | } | |
1026 | ||
1027 | static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |
1028 | { | |
1029 | struct urb *urb = &sc->work_urb; | |
1030 | struct bulk_cs_wrap *bcs; | |
2c26c9e6 | 1031 | int len; |
1da177e4 LT |
1032 | int rc; |
1033 | ||
1034 | if (atomic_read(&sc->poison)) { | |
2c26c9e6 PZ |
1035 | ub_state_done(sc, cmd, -ENODEV); |
1036 | return; | |
1da177e4 LT |
1037 | } |
1038 | ||
1039 | if (cmd->state == UB_CMDST_CLEAR) { | |
1040 | if (urb->status == -EPIPE) { | |
1041 | /* | |
1042 | * STALL while clearning STALL. | |
1043 | * The control pipe clears itself - nothing to do. | |
1da177e4 | 1044 | */ |
f4800078 PZ |
1045 | printk(KERN_NOTICE "%s: stall on control pipe\n", |
1046 | sc->name); | |
1da177e4 LT |
1047 | goto Bad_End; |
1048 | } | |
1049 | ||
1050 | /* | |
1051 | * We ignore the result for the halt clear. | |
1052 | */ | |
1053 | ||
1054 | /* reset the endpoint toggle */ | |
1055 | usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe), | |
1056 | usb_pipeout(sc->last_pipe), 0); | |
1057 | ||
1058 | ub_state_sense(sc, cmd); | |
1059 | ||
1060 | } else if (cmd->state == UB_CMDST_CLR2STS) { | |
1061 | if (urb->status == -EPIPE) { | |
f4800078 PZ |
1062 | printk(KERN_NOTICE "%s: stall on control pipe\n", |
1063 | sc->name); | |
1da177e4 LT |
1064 | goto Bad_End; |
1065 | } | |
1066 | ||
1067 | /* | |
1068 | * We ignore the result for the halt clear. | |
1069 | */ | |
1070 | ||
1071 | /* reset the endpoint toggle */ | |
1072 | usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe), | |
1073 | usb_pipeout(sc->last_pipe), 0); | |
1074 | ||
1075 | ub_state_stat(sc, cmd); | |
1076 | ||
1872bceb PZ |
1077 | } else if (cmd->state == UB_CMDST_CLRRS) { |
1078 | if (urb->status == -EPIPE) { | |
1872bceb PZ |
1079 | printk(KERN_NOTICE "%s: stall on control pipe\n", |
1080 | sc->name); | |
1081 | goto Bad_End; | |
1082 | } | |
1083 | ||
1084 | /* | |
1085 | * We ignore the result for the halt clear. | |
1086 | */ | |
1087 | ||
1088 | /* reset the endpoint toggle */ | |
1089 | usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe), | |
1090 | usb_pipeout(sc->last_pipe), 0); | |
1091 | ||
1092 | ub_state_stat_counted(sc, cmd); | |
1093 | ||
1da177e4 | 1094 | } else if (cmd->state == UB_CMDST_CMD) { |
2c26c9e6 PZ |
1095 | switch (urb->status) { |
1096 | case 0: | |
1097 | break; | |
1098 | case -EOVERFLOW: | |
1099 | goto Bad_End; | |
1100 | case -EPIPE: | |
1da177e4 LT |
1101 | rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); |
1102 | if (rc != 0) { | |
1103 | printk(KERN_NOTICE "%s: " | |
f4800078 PZ |
1104 | "unable to submit clear (%d)\n", |
1105 | sc->name, rc); | |
1da177e4 LT |
1106 | /* |
1107 | * This is typically ENOMEM or some other such shit. | |
1108 | * Retrying is pointless. Just do Bad End on it... | |
1109 | */ | |
2c26c9e6 PZ |
1110 | ub_state_done(sc, cmd, rc); |
1111 | return; | |
1da177e4 LT |
1112 | } |
1113 | cmd->state = UB_CMDST_CLEAR; | |
1da177e4 | 1114 | return; |
2c26c9e6 PZ |
1115 | case -ESHUTDOWN: /* unplug */ |
1116 | case -EILSEQ: /* unplug timeout on uhci */ | |
1117 | ub_state_done(sc, cmd, -ENODEV); | |
1118 | return; | |
1119 | default: | |
1da177e4 LT |
1120 | goto Bad_End; |
1121 | } | |
1122 | if (urb->actual_length != US_BULK_CB_WRAP_LEN) { | |
1da177e4 LT |
1123 | goto Bad_End; |
1124 | } | |
1125 | ||
a1cf96ef | 1126 | if (cmd->dir == UB_DIR_NONE || cmd->nsg < 1) { |
1da177e4 LT |
1127 | ub_state_stat(sc, cmd); |
1128 | return; | |
1129 | } | |
1130 | ||
a1cf96ef PZ |
1131 | // udelay(125); // usb-storage has this |
1132 | ub_data_start(sc, cmd); | |
1da177e4 LT |
1133 | |
1134 | } else if (cmd->state == UB_CMDST_DATA) { | |
1135 | if (urb->status == -EPIPE) { | |
1136 | rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); | |
1137 | if (rc != 0) { | |
1138 | printk(KERN_NOTICE "%s: " | |
f4800078 PZ |
1139 | "unable to submit clear (%d)\n", |
1140 | sc->name, rc); | |
2c26c9e6 PZ |
1141 | ub_state_done(sc, cmd, rc); |
1142 | return; | |
1da177e4 LT |
1143 | } |
1144 | cmd->state = UB_CMDST_CLR2STS; | |
1da177e4 LT |
1145 | return; |
1146 | } | |
1147 | if (urb->status == -EOVERFLOW) { | |
1148 | /* | |
1149 | * A babble? Failure, but we must transfer CSW now. | |
1150 | */ | |
1151 | cmd->error = -EOVERFLOW; /* A cheap trick... */ | |
a1cf96ef PZ |
1152 | ub_state_stat(sc, cmd); |
1153 | return; | |
1da177e4 | 1154 | } |
2c26c9e6 PZ |
1155 | |
1156 | if (cmd->dir == UB_DIR_WRITE) { | |
1157 | /* | |
1158 | * Do not continue writes in case of a failure. | |
1159 | * Doing so would cause sectors to be mixed up, | |
1160 | * which is worse than sectors lost. | |
1161 | * | |
1162 | * We must try to read the CSW, or many devices | |
1163 | * get confused. | |
1164 | */ | |
1165 | len = urb->actual_length; | |
1166 | if (urb->status != 0 || | |
1167 | len != cmd->sgv[cmd->current_sg].length) { | |
1168 | cmd->act_len += len; | |
2c26c9e6 PZ |
1169 | |
1170 | cmd->error = -EIO; | |
1171 | ub_state_stat(sc, cmd); | |
1172 | return; | |
1173 | } | |
1174 | ||
1175 | } else { | |
1176 | /* | |
1177 | * If an error occurs on read, we record it, and | |
1178 | * continue to fetch data in order to avoid bubble. | |
1179 | * | |
1180 | * As a small shortcut, we stop if we detect that | |
1181 | * a CSW mixed into data. | |
1182 | */ | |
1183 | if (urb->status != 0) | |
1184 | cmd->error = -EIO; | |
1185 | ||
1186 | len = urb->actual_length; | |
1187 | if (urb->status != 0 || | |
1188 | len != cmd->sgv[cmd->current_sg].length) { | |
1189 | if ((len & 0x1FF) == US_BULK_CS_WRAP_LEN) | |
1190 | goto Bad_End; | |
1191 | } | |
1192 | } | |
1da177e4 | 1193 | |
a1cf96ef | 1194 | cmd->act_len += urb->actual_length; |
1da177e4 | 1195 | |
a1cf96ef PZ |
1196 | if (++cmd->current_sg < cmd->nsg) { |
1197 | ub_data_start(sc, cmd); | |
1198 | return; | |
1199 | } | |
1da177e4 LT |
1200 | ub_state_stat(sc, cmd); |
1201 | ||
1202 | } else if (cmd->state == UB_CMDST_STAT) { | |
1203 | if (urb->status == -EPIPE) { | |
1204 | rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); | |
1205 | if (rc != 0) { | |
1206 | printk(KERN_NOTICE "%s: " | |
f4800078 PZ |
1207 | "unable to submit clear (%d)\n", |
1208 | sc->name, rc); | |
2c26c9e6 PZ |
1209 | ub_state_done(sc, cmd, rc); |
1210 | return; | |
1da177e4 | 1211 | } |
1872bceb PZ |
1212 | |
1213 | /* | |
1214 | * Having a stall when getting CSW is an error, so | |
1215 | * make sure uppper levels are not oblivious to it. | |
1216 | */ | |
1217 | cmd->error = -EIO; /* A cheap trick... */ | |
1218 | ||
1219 | cmd->state = UB_CMDST_CLRRS; | |
1da177e4 LT |
1220 | return; |
1221 | } | |
2c26c9e6 PZ |
1222 | |
1223 | /* Catch everything, including -EOVERFLOW and other nasties. */ | |
1da177e4 LT |
1224 | if (urb->status != 0) |
1225 | goto Bad_End; | |
1226 | ||
1227 | if (urb->actual_length == 0) { | |
1872bceb | 1228 | ub_state_stat_counted(sc, cmd); |
1da177e4 LT |
1229 | return; |
1230 | } | |
1231 | ||
1232 | /* | |
1233 | * Check the returned Bulk protocol status. | |
1872bceb | 1234 | * The status block has to be validated first. |
1da177e4 LT |
1235 | */ |
1236 | ||
1237 | bcs = &sc->work_bcs; | |
1872bceb PZ |
1238 | |
1239 | if (sc->signature == cpu_to_le32(0)) { | |
1da177e4 | 1240 | /* |
1872bceb PZ |
1241 | * This is the first reply, so do not perform the check. |
1242 | * Instead, remember the signature the device uses | |
1243 | * for future checks. But do not allow a nul. | |
1da177e4 | 1244 | */ |
1872bceb PZ |
1245 | sc->signature = bcs->Signature; |
1246 | if (sc->signature == cpu_to_le32(0)) { | |
1247 | ub_state_stat_counted(sc, cmd); | |
1248 | return; | |
1249 | } | |
1250 | } else { | |
1251 | if (bcs->Signature != sc->signature) { | |
1252 | ub_state_stat_counted(sc, cmd); | |
1253 | return; | |
1254 | } | |
1da177e4 | 1255 | } |
1da177e4 LT |
1256 | |
1257 | if (bcs->Tag != cmd->tag) { | |
1258 | /* | |
1259 | * This usually happens when we disagree with the | |
1260 | * device's microcode about something. For instance, | |
1261 | * a few of them throw this after timeouts. They buffer | |
1262 | * commands and reply at commands we timed out before. | |
1263 | * Without flushing these replies we loop forever. | |
1264 | */ | |
1872bceb | 1265 | ub_state_stat_counted(sc, cmd); |
1da177e4 LT |
1266 | return; |
1267 | } | |
1268 | ||
0da13c8c PZ |
1269 | if (!sc->bad_resid) { |
1270 | len = le32_to_cpu(bcs->Residue); | |
1271 | if (len != cmd->len - cmd->act_len) { | |
1272 | /* | |
1273 | * Only start ignoring if this cmd ended well. | |
1274 | */ | |
1275 | if (cmd->len == cmd->act_len) { | |
1276 | printk(KERN_NOTICE "%s: " | |
1277 | "bad residual %d of %d, ignoring\n", | |
1278 | sc->name, len, cmd->len); | |
1279 | sc->bad_resid = 1; | |
1280 | } | |
1281 | } | |
1872bceb PZ |
1282 | } |
1283 | ||
1da177e4 LT |
1284 | switch (bcs->Status) { |
1285 | case US_BULK_STAT_OK: | |
1286 | break; | |
1287 | case US_BULK_STAT_FAIL: | |
1288 | ub_state_sense(sc, cmd); | |
1289 | return; | |
1290 | case US_BULK_STAT_PHASE: | |
1da177e4 LT |
1291 | goto Bad_End; |
1292 | default: | |
1293 | printk(KERN_INFO "%s: unknown CSW status 0x%x\n", | |
1294 | sc->name, bcs->Status); | |
2c26c9e6 PZ |
1295 | ub_state_done(sc, cmd, -EINVAL); |
1296 | return; | |
1da177e4 LT |
1297 | } |
1298 | ||
1299 | /* Not zeroing error to preserve a babble indicator */ | |
1872bceb PZ |
1300 | if (cmd->error != 0) { |
1301 | ub_state_sense(sc, cmd); | |
1302 | return; | |
1303 | } | |
1da177e4 | 1304 | cmd->state = UB_CMDST_DONE; |
1da177e4 LT |
1305 | ub_cmdq_pop(sc); |
1306 | (*cmd->done)(sc, cmd); | |
1307 | ||
1308 | } else if (cmd->state == UB_CMDST_SENSE) { | |
1309 | ub_state_done(sc, cmd, -EIO); | |
1310 | ||
1311 | } else { | |
9029b174 | 1312 | printk(KERN_WARNING "%s: wrong command state %d\n", |
f4800078 | 1313 | sc->name, cmd->state); |
2c26c9e6 PZ |
1314 | ub_state_done(sc, cmd, -EINVAL); |
1315 | return; | |
1da177e4 LT |
1316 | } |
1317 | return; | |
1318 | ||
1319 | Bad_End: /* Little Excel is dead */ | |
1320 | ub_state_done(sc, cmd, -EIO); | |
1321 | } | |
1322 | ||
a1cf96ef PZ |
1323 | /* |
1324 | * Factorization helper for the command state machine: | |
1325 | * Initiate a data segment transfer. | |
1326 | */ | |
1327 | static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |
1328 | { | |
1329 | struct scatterlist *sg = &cmd->sgv[cmd->current_sg]; | |
1330 | int pipe; | |
1331 | int rc; | |
1332 | ||
1333 | UB_INIT_COMPLETION(sc->work_done); | |
1334 | ||
1335 | if (cmd->dir == UB_DIR_READ) | |
1336 | pipe = sc->recv_bulk_pipe; | |
1337 | else | |
1338 | pipe = sc->send_bulk_pipe; | |
1339 | sc->last_pipe = pipe; | |
45711f1a JA |
1340 | usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe, sg_virt(sg), |
1341 | sg->length, ub_urb_complete, sc); | |
a1cf96ef PZ |
1342 | |
1343 | if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { | |
1344 | /* XXX Clear stalls */ | |
a1cf96ef PZ |
1345 | ub_complete(&sc->work_done); |
1346 | ub_state_done(sc, cmd, rc); | |
1347 | return; | |
1348 | } | |
1349 | ||
2c51ae70 PZ |
1350 | if (cmd->timeo) |
1351 | sc->work_timer.expires = jiffies + cmd->timeo; | |
1352 | else | |
1353 | sc->work_timer.expires = jiffies + UB_DATA_TIMEOUT; | |
a1cf96ef PZ |
1354 | add_timer(&sc->work_timer); |
1355 | ||
1356 | cmd->state = UB_CMDST_DATA; | |
a1cf96ef PZ |
1357 | } |
1358 | ||
1da177e4 LT |
1359 | /* |
1360 | * Factorization helper for the command state machine: | |
1361 | * Finish the command. | |
1362 | */ | |
1363 | static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc) | |
1364 | { | |
1365 | ||
1366 | cmd->error = rc; | |
1367 | cmd->state = UB_CMDST_DONE; | |
1da177e4 LT |
1368 | ub_cmdq_pop(sc); |
1369 | (*cmd->done)(sc, cmd); | |
1370 | } | |
1371 | ||
1372 | /* | |
1373 | * Factorization helper for the command state machine: | |
1374 | * Submit a CSW read. | |
1375 | */ | |
1872bceb | 1376 | static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) |
1da177e4 LT |
1377 | { |
1378 | int rc; | |
1379 | ||
1380 | UB_INIT_COMPLETION(sc->work_done); | |
1381 | ||
1382 | sc->last_pipe = sc->recv_bulk_pipe; | |
1383 | usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->recv_bulk_pipe, | |
1384 | &sc->work_bcs, US_BULK_CS_WRAP_LEN, ub_urb_complete, sc); | |
1da177e4 LT |
1385 | |
1386 | if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { | |
1387 | /* XXX Clear stalls */ | |
1da177e4 LT |
1388 | ub_complete(&sc->work_done); |
1389 | ub_state_done(sc, cmd, rc); | |
1872bceb | 1390 | return -1; |
1da177e4 LT |
1391 | } |
1392 | ||
2c51ae70 PZ |
1393 | if (cmd->timeo) |
1394 | sc->work_timer.expires = jiffies + cmd->timeo; | |
1395 | else | |
1396 | sc->work_timer.expires = jiffies + UB_STAT_TIMEOUT; | |
1da177e4 | 1397 | add_timer(&sc->work_timer); |
1872bceb | 1398 | return 0; |
1da177e4 LT |
1399 | } |
1400 | ||
1401 | /* | |
1402 | * Factorization helper for the command state machine: | |
1403 | * Submit a CSW read and go to STAT state. | |
1404 | */ | |
1405 | static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |
1406 | { | |
1872bceb PZ |
1407 | |
1408 | if (__ub_state_stat(sc, cmd) != 0) | |
1409 | return; | |
1da177e4 LT |
1410 | |
1411 | cmd->stat_count = 0; | |
1412 | cmd->state = UB_CMDST_STAT; | |
1872bceb PZ |
1413 | } |
1414 | ||
1415 | /* | |
1416 | * Factorization helper for the command state machine: | |
1417 | * Submit a CSW read and go to STAT state with counter (along [C] path). | |
1418 | */ | |
1419 | static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |
1420 | { | |
1421 | ||
1422 | if (++cmd->stat_count >= 4) { | |
1423 | ub_state_sense(sc, cmd); | |
1424 | return; | |
1425 | } | |
1426 | ||
1427 | if (__ub_state_stat(sc, cmd) != 0) | |
1428 | return; | |
1429 | ||
1430 | cmd->state = UB_CMDST_STAT; | |
1da177e4 LT |
1431 | } |
1432 | ||
1433 | /* | |
1434 | * Factorization helper for the command state machine: | |
1435 | * Submit a REQUEST SENSE and go to SENSE state. | |
1436 | */ | |
1437 | static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |
1438 | { | |
1439 | struct ub_scsi_cmd *scmd; | |
a1cf96ef | 1440 | struct scatterlist *sg; |
1da177e4 LT |
1441 | int rc; |
1442 | ||
1443 | if (cmd->cdb[0] == REQUEST_SENSE) { | |
1444 | rc = -EPIPE; | |
1445 | goto error; | |
1446 | } | |
1447 | ||
1448 | scmd = &sc->top_rqs_cmd; | |
a1cf96ef | 1449 | memset(scmd, 0, sizeof(struct ub_scsi_cmd)); |
1da177e4 LT |
1450 | scmd->cdb[0] = REQUEST_SENSE; |
1451 | scmd->cdb[4] = UB_SENSE_SIZE; | |
1452 | scmd->cdb_len = 6; | |
1453 | scmd->dir = UB_DIR_READ; | |
1454 | scmd->state = UB_CMDST_INIT; | |
a1cf96ef PZ |
1455 | scmd->nsg = 1; |
1456 | sg = &scmd->sgv[0]; | |
4f33a9d9 | 1457 | sg_init_table(sg, UB_MAX_REQ_SG); |
642f1490 JA |
1458 | sg_set_page(sg, virt_to_page(sc->top_sense), UB_SENSE_SIZE, |
1459 | (unsigned long)sc->top_sense & (PAGE_SIZE-1)); | |
1da177e4 | 1460 | scmd->len = UB_SENSE_SIZE; |
f4800078 | 1461 | scmd->lun = cmd->lun; |
1da177e4 LT |
1462 | scmd->done = ub_top_sense_done; |
1463 | scmd->back = cmd; | |
1464 | ||
1465 | scmd->tag = sc->tagcnt++; | |
1466 | ||
1467 | cmd->state = UB_CMDST_SENSE; | |
1da177e4 LT |
1468 | |
1469 | ub_cmdq_insert(sc, scmd); | |
1470 | return; | |
1471 | ||
1472 | error: | |
1473 | ub_state_done(sc, cmd, rc); | |
1474 | } | |
1475 | ||
1476 | /* | |
1477 | * A helper for the command's state machine: | |
1478 | * Submit a stall clear. | |
1479 | */ | |
1480 | static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd, | |
1481 | int stalled_pipe) | |
1482 | { | |
1483 | int endp; | |
1484 | struct usb_ctrlrequest *cr; | |
1485 | int rc; | |
1486 | ||
1487 | endp = usb_pipeendpoint(stalled_pipe); | |
1488 | if (usb_pipein (stalled_pipe)) | |
1489 | endp |= USB_DIR_IN; | |
1490 | ||
1491 | cr = &sc->work_cr; | |
1492 | cr->bRequestType = USB_RECIP_ENDPOINT; | |
1493 | cr->bRequest = USB_REQ_CLEAR_FEATURE; | |
1494 | cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT); | |
1495 | cr->wIndex = cpu_to_le16(endp); | |
1496 | cr->wLength = cpu_to_le16(0); | |
1497 | ||
1498 | UB_INIT_COMPLETION(sc->work_done); | |
1499 | ||
1500 | usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe, | |
1501 | (unsigned char*) cr, NULL, 0, ub_urb_complete, sc); | |
1da177e4 LT |
1502 | |
1503 | if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { | |
1504 | ub_complete(&sc->work_done); | |
1505 | return rc; | |
1506 | } | |
1507 | ||
1508 | sc->work_timer.expires = jiffies + UB_CTRL_TIMEOUT; | |
1509 | add_timer(&sc->work_timer); | |
1510 | return 0; | |
1511 | } | |
1512 | ||
1513 | /* | |
1514 | */ | |
1515 | static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd) | |
1516 | { | |
a1cf96ef | 1517 | unsigned char *sense = sc->top_sense; |
1da177e4 LT |
1518 | struct ub_scsi_cmd *cmd; |
1519 | ||
1da177e4 LT |
1520 | /* |
1521 | * Find the command which triggered the unit attention or a check, | |
1522 | * save the sense into it, and advance its state machine. | |
1523 | */ | |
1524 | if ((cmd = ub_cmdq_peek(sc)) == NULL) { | |
1525 | printk(KERN_WARNING "%s: sense done while idle\n", sc->name); | |
1526 | return; | |
1527 | } | |
1528 | if (cmd != scmd->back) { | |
1529 | printk(KERN_WARNING "%s: " | |
f4800078 PZ |
1530 | "sense done for wrong command 0x%x\n", |
1531 | sc->name, cmd->tag); | |
1da177e4 LT |
1532 | return; |
1533 | } | |
1534 | if (cmd->state != UB_CMDST_SENSE) { | |
9029b174 | 1535 | printk(KERN_WARNING "%s: sense done with bad cmd state %d\n", |
f4800078 | 1536 | sc->name, cmd->state); |
1da177e4 LT |
1537 | return; |
1538 | } | |
1539 | ||
952ba222 PZ |
1540 | /* |
1541 | * Ignoring scmd->act_len, because the buffer was pre-zeroed. | |
1542 | */ | |
1da177e4 LT |
1543 | cmd->key = sense[2] & 0x0F; |
1544 | cmd->asc = sense[12]; | |
1545 | cmd->ascq = sense[13]; | |
1546 | ||
1547 | ub_scsi_urb_compl(sc, cmd); | |
1548 | } | |
1549 | ||
2c26c9e6 PZ |
1550 | /* |
1551 | * Reset management | |
2c2e4a2e PZ |
1552 | * XXX Move usb_reset_device to khubd. Hogging kevent is not a good thing. |
1553 | * XXX Make usb_sync_reset asynchronous. | |
2c26c9e6 PZ |
1554 | */ |
1555 | ||
2c2e4a2e | 1556 | static void ub_reset_enter(struct ub_dev *sc, int try) |
2c26c9e6 PZ |
1557 | { |
1558 | ||
1559 | if (sc->reset) { | |
1560 | /* This happens often on multi-LUN devices. */ | |
1561 | return; | |
1562 | } | |
2c2e4a2e | 1563 | sc->reset = try + 1; |
2c26c9e6 PZ |
1564 | |
1565 | #if 0 /* Not needed because the disconnect waits for us. */ | |
1566 | unsigned long flags; | |
1567 | spin_lock_irqsave(&ub_lock, flags); | |
1568 | sc->openc++; | |
1569 | spin_unlock_irqrestore(&ub_lock, flags); | |
1570 | #endif | |
1571 | ||
1572 | #if 0 /* We let them stop themselves. */ | |
2c26c9e6 | 1573 | struct ub_lun *lun; |
a69228de | 1574 | list_for_each_entry(lun, &sc->luns, link) { |
2c26c9e6 PZ |
1575 | blk_stop_queue(lun->disk->queue); |
1576 | } | |
1577 | #endif | |
1578 | ||
1579 | schedule_work(&sc->reset_work); | |
1580 | } | |
1581 | ||
c4028958 | 1582 | static void ub_reset_task(struct work_struct *work) |
2c26c9e6 | 1583 | { |
c4028958 | 1584 | struct ub_dev *sc = container_of(work, struct ub_dev, reset_work); |
2c26c9e6 | 1585 | unsigned long flags; |
2c26c9e6 PZ |
1586 | struct ub_lun *lun; |
1587 | int lkr, rc; | |
1588 | ||
1589 | if (!sc->reset) { | |
1590 | printk(KERN_WARNING "%s: Running reset unrequested\n", | |
1591 | sc->name); | |
1592 | return; | |
1593 | } | |
1594 | ||
1595 | if (atomic_read(&sc->poison)) { | |
b5600339 | 1596 | ; |
2c2e4a2e PZ |
1597 | } else if ((sc->reset & 1) == 0) { |
1598 | ub_sync_reset(sc); | |
1599 | msleep(700); /* usb-storage sleeps 6s (!) */ | |
1600 | ub_probe_clear_stall(sc, sc->recv_bulk_pipe); | |
1601 | ub_probe_clear_stall(sc, sc->send_bulk_pipe); | |
2c26c9e6 | 1602 | } else if (sc->dev->actconfig->desc.bNumInterfaces != 1) { |
b5600339 | 1603 | ; |
2c26c9e6 PZ |
1604 | } else { |
1605 | if ((lkr = usb_lock_device_for_reset(sc->dev, sc->intf)) < 0) { | |
1606 | printk(KERN_NOTICE | |
1607 | "%s: usb_lock_device_for_reset failed (%d)\n", | |
1608 | sc->name, lkr); | |
1609 | } else { | |
1610 | rc = usb_reset_device(sc->dev); | |
1611 | if (rc < 0) { | |
1612 | printk(KERN_NOTICE "%s: " | |
1613 | "usb_lock_device_for_reset failed (%d)\n", | |
1614 | sc->name, rc); | |
1615 | } | |
1616 | ||
1617 | if (lkr) | |
1618 | usb_unlock_device(sc->dev); | |
1619 | } | |
1620 | } | |
1621 | ||
1622 | /* | |
1623 | * In theory, no commands can be running while reset is active, | |
1624 | * so nobody can ask for another reset, and so we do not need any | |
1625 | * queues of resets or anything. We do need a spinlock though, | |
1626 | * to interact with block layer. | |
1627 | */ | |
65b4fe55 | 1628 | spin_lock_irqsave(sc->lock, flags); |
2c26c9e6 PZ |
1629 | sc->reset = 0; |
1630 | tasklet_schedule(&sc->tasklet); | |
a69228de | 1631 | list_for_each_entry(lun, &sc->luns, link) { |
2c26c9e6 PZ |
1632 | blk_start_queue(lun->disk->queue); |
1633 | } | |
1634 | wake_up(&sc->reset_wait); | |
65b4fe55 | 1635 | spin_unlock_irqrestore(sc->lock, flags); |
2c26c9e6 PZ |
1636 | } |
1637 | ||
1da177e4 LT |
1638 | /* |
1639 | * This is called from a process context. | |
1640 | */ | |
f4800078 | 1641 | static void ub_revalidate(struct ub_dev *sc, struct ub_lun *lun) |
1da177e4 LT |
1642 | { |
1643 | ||
f4800078 | 1644 | lun->readonly = 0; /* XXX Query this from the device */ |
1da177e4 | 1645 | |
f4800078 PZ |
1646 | lun->capacity.nsec = 0; |
1647 | lun->capacity.bsize = 512; | |
1648 | lun->capacity.bshift = 0; | |
1da177e4 | 1649 | |
f4800078 | 1650 | if (ub_sync_tur(sc, lun) != 0) |
1da177e4 | 1651 | return; /* Not ready */ |
f4800078 | 1652 | lun->changed = 0; |
1da177e4 | 1653 | |
f4800078 | 1654 | if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) { |
1da177e4 LT |
1655 | /* |
1656 | * The retry here means something is wrong, either with the | |
1657 | * device, with the transport, or with our code. | |
1658 | * We keep this because sd.c has retries for capacity. | |
1659 | */ | |
f4800078 PZ |
1660 | if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) { |
1661 | lun->capacity.nsec = 0; | |
1662 | lun->capacity.bsize = 512; | |
1663 | lun->capacity.bshift = 0; | |
1da177e4 LT |
1664 | } |
1665 | } | |
1666 | } | |
1667 | ||
1668 | /* | |
1669 | * The open funcion. | |
1670 | * This is mostly needed to keep refcounting, but also to support | |
1671 | * media checks on removable media drives. | |
1672 | */ | |
1673 | static int ub_bd_open(struct inode *inode, struct file *filp) | |
1674 | { | |
1675 | struct gendisk *disk = inode->i_bdev->bd_disk; | |
41fea55e PZ |
1676 | struct ub_lun *lun = disk->private_data; |
1677 | struct ub_dev *sc = lun->udev; | |
1da177e4 LT |
1678 | unsigned long flags; |
1679 | int rc; | |
1680 | ||
1da177e4 LT |
1681 | spin_lock_irqsave(&ub_lock, flags); |
1682 | if (atomic_read(&sc->poison)) { | |
1683 | spin_unlock_irqrestore(&ub_lock, flags); | |
1684 | return -ENXIO; | |
1685 | } | |
1686 | sc->openc++; | |
1687 | spin_unlock_irqrestore(&ub_lock, flags); | |
1688 | ||
f4800078 | 1689 | if (lun->removable || lun->readonly) |
1da177e4 LT |
1690 | check_disk_change(inode->i_bdev); |
1691 | ||
1692 | /* | |
1693 | * The sd.c considers ->media_present and ->changed not equivalent, | |
1694 | * under some pretty murky conditions (a failure of READ CAPACITY). | |
1695 | * We may need it one day. | |
1696 | */ | |
f4800078 | 1697 | if (lun->removable && lun->changed && !(filp->f_flags & O_NDELAY)) { |
1da177e4 LT |
1698 | rc = -ENOMEDIUM; |
1699 | goto err_open; | |
1700 | } | |
1701 | ||
f4800078 | 1702 | if (lun->readonly && (filp->f_mode & FMODE_WRITE)) { |
1da177e4 LT |
1703 | rc = -EROFS; |
1704 | goto err_open; | |
1705 | } | |
1706 | ||
1707 | return 0; | |
1708 | ||
1709 | err_open: | |
1710 | ub_put(sc); | |
1711 | return rc; | |
1712 | } | |
1713 | ||
1714 | /* | |
1715 | */ | |
1716 | static int ub_bd_release(struct inode *inode, struct file *filp) | |
1717 | { | |
1718 | struct gendisk *disk = inode->i_bdev->bd_disk; | |
f4800078 PZ |
1719 | struct ub_lun *lun = disk->private_data; |
1720 | struct ub_dev *sc = lun->udev; | |
1da177e4 LT |
1721 | |
1722 | ub_put(sc); | |
1723 | return 0; | |
1724 | } | |
1725 | ||
1726 | /* | |
1727 | * The ioctl interface. | |
1728 | */ | |
1729 | static int ub_bd_ioctl(struct inode *inode, struct file *filp, | |
1730 | unsigned int cmd, unsigned long arg) | |
1731 | { | |
1732 | struct gendisk *disk = inode->i_bdev->bd_disk; | |
1733 | void __user *usermem = (void __user *) arg; | |
1734 | ||
45e79a3a | 1735 | return scsi_cmd_ioctl(filp, disk->queue, disk, cmd, usermem); |
1da177e4 LT |
1736 | } |
1737 | ||
1738 | /* | |
9029b174 | 1739 | * This is called by check_disk_change if we reported a media change. |
1da177e4 LT |
1740 | * The main onjective here is to discover the features of the media such as |
1741 | * the capacity, read-only status, etc. USB storage generally does not | |
1742 | * need to be spun up, but if we needed it, this would be the place. | |
1743 | * | |
1744 | * This call can sleep. | |
1745 | * | |
1746 | * The return code is not used. | |
1747 | */ | |
1748 | static int ub_bd_revalidate(struct gendisk *disk) | |
1749 | { | |
f4800078 PZ |
1750 | struct ub_lun *lun = disk->private_data; |
1751 | ||
1752 | ub_revalidate(lun->udev, lun); | |
1da177e4 LT |
1753 | |
1754 | /* XXX Support sector size switching like in sr.c */ | |
f4800078 PZ |
1755 | blk_queue_hardsect_size(disk->queue, lun->capacity.bsize); |
1756 | set_capacity(disk, lun->capacity.nsec); | |
1757 | // set_disk_ro(sdkp->disk, lun->readonly); | |
1da177e4 LT |
1758 | |
1759 | return 0; | |
1760 | } | |
1761 | ||
1762 | /* | |
1763 | * The check is called by the block layer to verify if the media | |
1764 | * is still available. It is supposed to be harmless, lightweight and | |
1765 | * non-intrusive in case the media was not changed. | |
1766 | * | |
1767 | * This call can sleep. | |
1768 | * | |
1769 | * The return code is bool! | |
1770 | */ | |
1771 | static int ub_bd_media_changed(struct gendisk *disk) | |
1772 | { | |
f4800078 | 1773 | struct ub_lun *lun = disk->private_data; |
1da177e4 | 1774 | |
f4800078 | 1775 | if (!lun->removable) |
1da177e4 LT |
1776 | return 0; |
1777 | ||
1778 | /* | |
1779 | * We clean checks always after every command, so this is not | |
1780 | * as dangerous as it looks. If the TEST_UNIT_READY fails here, | |
1781 | * the device is actually not ready with operator or software | |
1782 | * intervention required. One dangerous item might be a drive which | |
1783 | * spins itself down, and come the time to write dirty pages, this | |
1784 | * will fail, then block layer discards the data. Since we never | |
1785 | * spin drives up, such devices simply cannot be used with ub anyway. | |
1786 | */ | |
f4800078 PZ |
1787 | if (ub_sync_tur(lun->udev, lun) != 0) { |
1788 | lun->changed = 1; | |
1da177e4 LT |
1789 | return 1; |
1790 | } | |
1791 | ||
f4800078 | 1792 | return lun->changed; |
1da177e4 LT |
1793 | } |
1794 | ||
1795 | static struct block_device_operations ub_bd_fops = { | |
1796 | .owner = THIS_MODULE, | |
1797 | .open = ub_bd_open, | |
1798 | .release = ub_bd_release, | |
1799 | .ioctl = ub_bd_ioctl, | |
1800 | .media_changed = ub_bd_media_changed, | |
1801 | .revalidate_disk = ub_bd_revalidate, | |
1802 | }; | |
1803 | ||
1804 | /* | |
1805 | * Common ->done routine for commands executed synchronously. | |
1806 | */ | |
1807 | static void ub_probe_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) | |
1808 | { | |
1809 | struct completion *cop = cmd->back; | |
1810 | complete(cop); | |
1811 | } | |
1812 | ||
1813 | /* | |
1814 | * Test if the device has a check condition on it, synchronously. | |
1815 | */ | |
f4800078 | 1816 | static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun) |
1da177e4 LT |
1817 | { |
1818 | struct ub_scsi_cmd *cmd; | |
1819 | enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) }; | |
1820 | unsigned long flags; | |
1821 | struct completion compl; | |
1822 | int rc; | |
1823 | ||
1824 | init_completion(&compl); | |
1825 | ||
1826 | rc = -ENOMEM; | |
29da7937 | 1827 | if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL) |
1da177e4 | 1828 | goto err_alloc; |
1da177e4 LT |
1829 | |
1830 | cmd->cdb[0] = TEST_UNIT_READY; | |
1831 | cmd->cdb_len = 6; | |
1832 | cmd->dir = UB_DIR_NONE; | |
1833 | cmd->state = UB_CMDST_INIT; | |
f4800078 | 1834 | cmd->lun = lun; /* This may be NULL, but that's ok */ |
1da177e4 LT |
1835 | cmd->done = ub_probe_done; |
1836 | cmd->back = &compl; | |
1837 | ||
65b4fe55 | 1838 | spin_lock_irqsave(sc->lock, flags); |
1da177e4 LT |
1839 | cmd->tag = sc->tagcnt++; |
1840 | ||
1841 | rc = ub_submit_scsi(sc, cmd); | |
65b4fe55 | 1842 | spin_unlock_irqrestore(sc->lock, flags); |
1da177e4 | 1843 | |
b5600339 | 1844 | if (rc != 0) |
1da177e4 | 1845 | goto err_submit; |
1da177e4 LT |
1846 | |
1847 | wait_for_completion(&compl); | |
1848 | ||
1849 | rc = cmd->error; | |
1850 | ||
1851 | if (rc == -EIO && cmd->key != 0) /* Retries for benh's key */ | |
1852 | rc = cmd->key; | |
1853 | ||
1854 | err_submit: | |
1855 | kfree(cmd); | |
1856 | err_alloc: | |
1857 | return rc; | |
1858 | } | |
1859 | ||
1860 | /* | |
1861 | * Read the SCSI capacity synchronously (for probing). | |
1862 | */ | |
f4800078 PZ |
1863 | static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, |
1864 | struct ub_capacity *ret) | |
1da177e4 LT |
1865 | { |
1866 | struct ub_scsi_cmd *cmd; | |
a1cf96ef | 1867 | struct scatterlist *sg; |
1da177e4 LT |
1868 | char *p; |
1869 | enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) + 8 }; | |
1870 | unsigned long flags; | |
1871 | unsigned int bsize, shift; | |
1872 | unsigned long nsec; | |
1873 | struct completion compl; | |
1874 | int rc; | |
1875 | ||
1876 | init_completion(&compl); | |
1877 | ||
1878 | rc = -ENOMEM; | |
29da7937 | 1879 | if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL) |
1da177e4 | 1880 | goto err_alloc; |
1da177e4 LT |
1881 | p = (char *)cmd + sizeof(struct ub_scsi_cmd); |
1882 | ||
1883 | cmd->cdb[0] = 0x25; | |
1884 | cmd->cdb_len = 10; | |
1885 | cmd->dir = UB_DIR_READ; | |
1886 | cmd->state = UB_CMDST_INIT; | |
a1cf96ef PZ |
1887 | cmd->nsg = 1; |
1888 | sg = &cmd->sgv[0]; | |
4f33a9d9 | 1889 | sg_init_table(sg, UB_MAX_REQ_SG); |
642f1490 | 1890 | sg_set_page(sg, virt_to_page(p), 8, (unsigned long)p & (PAGE_SIZE-1)); |
1da177e4 | 1891 | cmd->len = 8; |
f4800078 | 1892 | cmd->lun = lun; |
1da177e4 LT |
1893 | cmd->done = ub_probe_done; |
1894 | cmd->back = &compl; | |
1895 | ||
65b4fe55 | 1896 | spin_lock_irqsave(sc->lock, flags); |
1da177e4 LT |
1897 | cmd->tag = sc->tagcnt++; |
1898 | ||
1899 | rc = ub_submit_scsi(sc, cmd); | |
65b4fe55 | 1900 | spin_unlock_irqrestore(sc->lock, flags); |
1da177e4 | 1901 | |
b5600339 | 1902 | if (rc != 0) |
1da177e4 | 1903 | goto err_submit; |
1da177e4 LT |
1904 | |
1905 | wait_for_completion(&compl); | |
1906 | ||
1907 | if (cmd->error != 0) { | |
1da177e4 LT |
1908 | rc = -EIO; |
1909 | goto err_read; | |
1910 | } | |
1911 | if (cmd->act_len != 8) { | |
1da177e4 LT |
1912 | rc = -EIO; |
1913 | goto err_read; | |
1914 | } | |
1915 | ||
1916 | /* sd.c special-cases sector size of 0 to mean 512. Needed? Safe? */ | |
1917 | nsec = be32_to_cpu(*(__be32 *)p) + 1; | |
1918 | bsize = be32_to_cpu(*(__be32 *)(p + 4)); | |
1919 | switch (bsize) { | |
1920 | case 512: shift = 0; break; | |
1921 | case 1024: shift = 1; break; | |
1922 | case 2048: shift = 2; break; | |
1923 | case 4096: shift = 3; break; | |
1924 | default: | |
1da177e4 LT |
1925 | rc = -EDOM; |
1926 | goto err_inv_bsize; | |
1927 | } | |
1928 | ||
1929 | ret->bsize = bsize; | |
1930 | ret->bshift = shift; | |
1931 | ret->nsec = nsec << shift; | |
1932 | rc = 0; | |
1933 | ||
1934 | err_inv_bsize: | |
1935 | err_read: | |
1936 | err_submit: | |
1937 | kfree(cmd); | |
1938 | err_alloc: | |
1939 | return rc; | |
1940 | } | |
1941 | ||
1942 | /* | |
1943 | */ | |
7d12e780 | 1944 | static void ub_probe_urb_complete(struct urb *urb) |
1da177e4 LT |
1945 | { |
1946 | struct completion *cop = urb->context; | |
1947 | complete(cop); | |
1948 | } | |
1949 | ||
1950 | static void ub_probe_timeout(unsigned long arg) | |
1951 | { | |
1952 | struct completion *cop = (struct completion *) arg; | |
1953 | complete(cop); | |
1954 | } | |
1955 | ||
2c2e4a2e PZ |
1956 | /* |
1957 | * Reset with a Bulk reset. | |
1958 | */ | |
1959 | static int ub_sync_reset(struct ub_dev *sc) | |
1960 | { | |
1961 | int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber; | |
1962 | struct usb_ctrlrequest *cr; | |
1963 | struct completion compl; | |
1964 | struct timer_list timer; | |
1965 | int rc; | |
1966 | ||
1967 | init_completion(&compl); | |
1968 | ||
1969 | cr = &sc->work_cr; | |
1970 | cr->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE; | |
1971 | cr->bRequest = US_BULK_RESET_REQUEST; | |
1972 | cr->wValue = cpu_to_le16(0); | |
1973 | cr->wIndex = cpu_to_le16(ifnum); | |
1974 | cr->wLength = cpu_to_le16(0); | |
1975 | ||
1976 | usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe, | |
1977 | (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl); | |
2c2e4a2e PZ |
1978 | |
1979 | if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) { | |
1980 | printk(KERN_WARNING | |
1981 | "%s: Unable to submit a bulk reset (%d)\n", sc->name, rc); | |
1982 | return rc; | |
1983 | } | |
1984 | ||
1985 | init_timer(&timer); | |
1986 | timer.function = ub_probe_timeout; | |
1987 | timer.data = (unsigned long) &compl; | |
1988 | timer.expires = jiffies + UB_CTRL_TIMEOUT; | |
1989 | add_timer(&timer); | |
1990 | ||
1991 | wait_for_completion(&compl); | |
1992 | ||
1993 | del_timer_sync(&timer); | |
1994 | usb_kill_urb(&sc->work_urb); | |
1995 | ||
1996 | return sc->work_urb.status; | |
1997 | } | |
1998 | ||
f4800078 PZ |
1999 | /* |
2000 | * Get number of LUNs by the way of Bulk GetMaxLUN command. | |
2001 | */ | |
2002 | static int ub_sync_getmaxlun(struct ub_dev *sc) | |
2003 | { | |
2004 | int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber; | |
2005 | unsigned char *p; | |
2006 | enum { ALLOC_SIZE = 1 }; | |
2007 | struct usb_ctrlrequest *cr; | |
2008 | struct completion compl; | |
2009 | struct timer_list timer; | |
2010 | int nluns; | |
2011 | int rc; | |
2012 | ||
2013 | init_completion(&compl); | |
2014 | ||
2015 | rc = -ENOMEM; | |
2016 | if ((p = kmalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL) | |
2017 | goto err_alloc; | |
2018 | *p = 55; | |
2019 | ||
2020 | cr = &sc->work_cr; | |
2021 | cr->bRequestType = USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE; | |
2022 | cr->bRequest = US_BULK_GET_MAX_LUN; | |
2023 | cr->wValue = cpu_to_le16(0); | |
2024 | cr->wIndex = cpu_to_le16(ifnum); | |
2025 | cr->wLength = cpu_to_le16(1); | |
2026 | ||
2027 | usb_fill_control_urb(&sc->work_urb, sc->dev, sc->recv_ctrl_pipe, | |
2028 | (unsigned char*) cr, p, 1, ub_probe_urb_complete, &compl); | |
f4800078 | 2029 | |
b5600339 | 2030 | if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) |
f4800078 | 2031 | goto err_submit; |
f4800078 PZ |
2032 | |
2033 | init_timer(&timer); | |
2034 | timer.function = ub_probe_timeout; | |
2035 | timer.data = (unsigned long) &compl; | |
2036 | timer.expires = jiffies + UB_CTRL_TIMEOUT; | |
2037 | add_timer(&timer); | |
2038 | ||
2039 | wait_for_completion(&compl); | |
2040 | ||
2041 | del_timer_sync(&timer); | |
2042 | usb_kill_urb(&sc->work_urb); | |
2043 | ||
b5600339 | 2044 | if ((rc = sc->work_urb.status) < 0) |
64bd8453 | 2045 | goto err_io; |
64bd8453 | 2046 | |
f4800078 | 2047 | if (sc->work_urb.actual_length != 1) { |
f4800078 PZ |
2048 | nluns = 0; |
2049 | } else { | |
2050 | if ((nluns = *p) == 55) { | |
2051 | nluns = 0; | |
2052 | } else { | |
2053 | /* GetMaxLUN returns the maximum LUN number */ | |
2054 | nluns += 1; | |
2055 | if (nluns > UB_MAX_LUNS) | |
2056 | nluns = UB_MAX_LUNS; | |
2057 | } | |
f4800078 PZ |
2058 | } |
2059 | ||
2060 | kfree(p); | |
2061 | return nluns; | |
2062 | ||
64bd8453 | 2063 | err_io: |
f4800078 PZ |
2064 | err_submit: |
2065 | kfree(p); | |
2066 | err_alloc: | |
2067 | return rc; | |
2068 | } | |
2069 | ||
1da177e4 LT |
2070 | /* |
2071 | * Clear initial stalls. | |
2072 | */ | |
2073 | static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe) | |
2074 | { | |
2075 | int endp; | |
2076 | struct usb_ctrlrequest *cr; | |
2077 | struct completion compl; | |
2078 | struct timer_list timer; | |
2079 | int rc; | |
2080 | ||
2081 | init_completion(&compl); | |
2082 | ||
2083 | endp = usb_pipeendpoint(stalled_pipe); | |
2084 | if (usb_pipein (stalled_pipe)) | |
2085 | endp |= USB_DIR_IN; | |
2086 | ||
2087 | cr = &sc->work_cr; | |
2088 | cr->bRequestType = USB_RECIP_ENDPOINT; | |
2089 | cr->bRequest = USB_REQ_CLEAR_FEATURE; | |
2090 | cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT); | |
2091 | cr->wIndex = cpu_to_le16(endp); | |
2092 | cr->wLength = cpu_to_le16(0); | |
2093 | ||
2094 | usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe, | |
2095 | (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl); | |
1da177e4 LT |
2096 | |
2097 | if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) { | |
2098 | printk(KERN_WARNING | |
2099 | "%s: Unable to submit a probe clear (%d)\n", sc->name, rc); | |
2100 | return rc; | |
2101 | } | |
2102 | ||
2103 | init_timer(&timer); | |
2104 | timer.function = ub_probe_timeout; | |
2105 | timer.data = (unsigned long) &compl; | |
2106 | timer.expires = jiffies + UB_CTRL_TIMEOUT; | |
2107 | add_timer(&timer); | |
2108 | ||
2109 | wait_for_completion(&compl); | |
2110 | ||
2111 | del_timer_sync(&timer); | |
2112 | usb_kill_urb(&sc->work_urb); | |
2113 | ||
2114 | /* reset the endpoint toggle */ | |
2115 | usb_settoggle(sc->dev, endp, usb_pipeout(sc->last_pipe), 0); | |
2116 | ||
2117 | return 0; | |
2118 | } | |
2119 | ||
2120 | /* | |
2121 | * Get the pipe settings. | |
2122 | */ | |
2123 | static int ub_get_pipes(struct ub_dev *sc, struct usb_device *dev, | |
2124 | struct usb_interface *intf) | |
2125 | { | |
2126 | struct usb_host_interface *altsetting = intf->cur_altsetting; | |
2127 | struct usb_endpoint_descriptor *ep_in = NULL; | |
2128 | struct usb_endpoint_descriptor *ep_out = NULL; | |
2129 | struct usb_endpoint_descriptor *ep; | |
2130 | int i; | |
2131 | ||
2132 | /* | |
2133 | * Find the endpoints we need. | |
2134 | * We are expecting a minimum of 2 endpoints - in and out (bulk). | |
2135 | * We will ignore any others. | |
2136 | */ | |
2137 | for (i = 0; i < altsetting->desc.bNumEndpoints; i++) { | |
2138 | ep = &altsetting->endpoint[i].desc; | |
2139 | ||
2140 | /* Is it a BULK endpoint? */ | |
2141 | if ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) | |
2142 | == USB_ENDPOINT_XFER_BULK) { | |
2143 | /* BULK in or out? */ | |
643616e6 PZ |
2144 | if (ep->bEndpointAddress & USB_DIR_IN) { |
2145 | if (ep_in == NULL) | |
2146 | ep_in = ep; | |
2147 | } else { | |
2148 | if (ep_out == NULL) | |
2149 | ep_out = ep; | |
2150 | } | |
1da177e4 LT |
2151 | } |
2152 | } | |
2153 | ||
2154 | if (ep_in == NULL || ep_out == NULL) { | |
9029b174 | 2155 | printk(KERN_NOTICE "%s: failed endpoint check\n", sc->name); |
2c26c9e6 | 2156 | return -ENODEV; |
1da177e4 LT |
2157 | } |
2158 | ||
2159 | /* Calculate and store the pipe values */ | |
2160 | sc->send_ctrl_pipe = usb_sndctrlpipe(dev, 0); | |
2161 | sc->recv_ctrl_pipe = usb_rcvctrlpipe(dev, 0); | |
2162 | sc->send_bulk_pipe = usb_sndbulkpipe(dev, | |
2163 | ep_out->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); | |
2164 | sc->recv_bulk_pipe = usb_rcvbulkpipe(dev, | |
2165 | ep_in->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); | |
2166 | ||
2167 | return 0; | |
2168 | } | |
2169 | ||
2170 | /* | |
2171 | * Probing is done in the process context, which allows us to cheat | |
2172 | * and not to build a state machine for the discovery. | |
2173 | */ | |
2174 | static int ub_probe(struct usb_interface *intf, | |
2175 | const struct usb_device_id *dev_id) | |
2176 | { | |
2177 | struct ub_dev *sc; | |
f4800078 | 2178 | int nluns; |
1da177e4 LT |
2179 | int rc; |
2180 | int i; | |
2181 | ||
a00828e9 PZ |
2182 | if (usb_usual_check_type(dev_id, USB_US_TYPE_UB)) |
2183 | return -ENXIO; | |
2184 | ||
1da177e4 | 2185 | rc = -ENOMEM; |
29da7937 | 2186 | if ((sc = kzalloc(sizeof(struct ub_dev), GFP_KERNEL)) == NULL) |
1da177e4 | 2187 | goto err_core; |
65b4fe55 | 2188 | sc->lock = ub_next_lock(); |
f4800078 | 2189 | INIT_LIST_HEAD(&sc->luns); |
1da177e4 LT |
2190 | usb_init_urb(&sc->work_urb); |
2191 | tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc); | |
2192 | atomic_set(&sc->poison, 0); | |
c4028958 | 2193 | INIT_WORK(&sc->reset_work, ub_reset_task); |
2c26c9e6 | 2194 | init_waitqueue_head(&sc->reset_wait); |
1da177e4 LT |
2195 | |
2196 | init_timer(&sc->work_timer); | |
2197 | sc->work_timer.data = (unsigned long) sc; | |
2198 | sc->work_timer.function = ub_urb_timeout; | |
2199 | ||
2200 | ub_init_completion(&sc->work_done); | |
2201 | sc->work_done.done = 1; /* A little yuk, but oh well... */ | |
2202 | ||
1da177e4 LT |
2203 | sc->dev = interface_to_usbdev(intf); |
2204 | sc->intf = intf; | |
2205 | // sc->ifnum = intf->cur_altsetting->desc.bInterfaceNumber; | |
1da177e4 LT |
2206 | usb_set_intfdata(intf, sc); |
2207 | usb_get_dev(sc->dev); | |
77ef6c4d PZ |
2208 | /* |
2209 | * Since we give the interface struct to the block level through | |
2210 | * disk->driverfs_dev, we have to pin it. Otherwise, block_uevent | |
2211 | * oopses on close after a disconnect (kernels 2.6.16 and up). | |
2212 | */ | |
2213 | usb_get_intf(sc->intf); | |
1da177e4 | 2214 | |
f4800078 PZ |
2215 | snprintf(sc->name, 12, DRV_NAME "(%d.%d)", |
2216 | sc->dev->bus->busnum, sc->dev->devnum); | |
2217 | ||
1da177e4 LT |
2218 | /* XXX Verify that we can handle the device (from descriptors) */ |
2219 | ||
2c26c9e6 PZ |
2220 | if (ub_get_pipes(sc, sc->dev, intf) != 0) |
2221 | goto err_dev_desc; | |
1da177e4 | 2222 | |
1da177e4 LT |
2223 | /* |
2224 | * At this point, all USB initialization is done, do upper layer. | |
2225 | * We really hate halfway initialized structures, so from the | |
2226 | * invariants perspective, this ub_dev is fully constructed at | |
2227 | * this point. | |
2228 | */ | |
2229 | ||
2230 | /* | |
2231 | * This is needed to clear toggles. It is a problem only if we do | |
2232 | * `rmmod ub && modprobe ub` without disconnects, but we like that. | |
2233 | */ | |
c6c88834 | 2234 | #if 0 /* iPod Mini fails if we do this (big white iPod works) */ |
1da177e4 LT |
2235 | ub_probe_clear_stall(sc, sc->recv_bulk_pipe); |
2236 | ub_probe_clear_stall(sc, sc->send_bulk_pipe); | |
c6c88834 | 2237 | #endif |
1da177e4 LT |
2238 | |
2239 | /* | |
2240 | * The way this is used by the startup code is a little specific. | |
2241 | * A SCSI check causes a USB stall. Our common case code sees it | |
2242 | * and clears the check, after which the device is ready for use. | |
2243 | * But if a check was not present, any command other than | |
2244 | * TEST_UNIT_READY ends with a lockup (including REQUEST_SENSE). | |
2245 | * | |
2246 | * If we neglect to clear the SCSI check, the first real command fails | |
2247 | * (which is the capacity readout). We clear that and retry, but why | |
2248 | * causing spurious retries for no reason. | |
2249 | * | |
2250 | * Revalidation may start with its own TEST_UNIT_READY, but that one | |
2251 | * has to succeed, so we clear checks with an additional one here. | |
2252 | * In any case it's not our business how revaliadation is implemented. | |
2253 | */ | |
b5600339 | 2254 | for (i = 0; i < 3; i++) { /* Retries for the schwag key from KS'04 */ |
f4800078 | 2255 | if ((rc = ub_sync_tur(sc, NULL)) <= 0) break; |
1da177e4 LT |
2256 | if (rc != 0x6) break; |
2257 | msleep(10); | |
2258 | } | |
2259 | ||
f4800078 PZ |
2260 | nluns = 1; |
2261 | for (i = 0; i < 3; i++) { | |
11a223ae | 2262 | if ((rc = ub_sync_getmaxlun(sc)) < 0) |
f4800078 | 2263 | break; |
f4800078 PZ |
2264 | if (rc != 0) { |
2265 | nluns = rc; | |
2266 | break; | |
2267 | } | |
9f793d2c | 2268 | msleep(100); |
f4800078 | 2269 | } |
1da177e4 | 2270 | |
f4800078 PZ |
2271 | for (i = 0; i < nluns; i++) { |
2272 | ub_probe_lun(sc, i); | |
2273 | } | |
2274 | return 0; | |
2275 | ||
2c26c9e6 | 2276 | err_dev_desc: |
f4800078 | 2277 | usb_set_intfdata(intf, NULL); |
77ef6c4d | 2278 | usb_put_intf(sc->intf); |
f4800078 PZ |
2279 | usb_put_dev(sc->dev); |
2280 | kfree(sc); | |
2281 | err_core: | |
2282 | return rc; | |
2283 | } | |
2284 | ||
2285 | static int ub_probe_lun(struct ub_dev *sc, int lnum) | |
2286 | { | |
2287 | struct ub_lun *lun; | |
165125e1 | 2288 | struct request_queue *q; |
f4800078 PZ |
2289 | struct gendisk *disk; |
2290 | int rc; | |
2291 | ||
2292 | rc = -ENOMEM; | |
29da7937 | 2293 | if ((lun = kzalloc(sizeof(struct ub_lun), GFP_KERNEL)) == NULL) |
f4800078 | 2294 | goto err_alloc; |
f4800078 PZ |
2295 | lun->num = lnum; |
2296 | ||
2297 | rc = -ENOSR; | |
2298 | if ((lun->id = ub_id_get()) == -1) | |
2299 | goto err_id; | |
2300 | ||
2301 | lun->udev = sc; | |
f4800078 PZ |
2302 | |
2303 | snprintf(lun->name, 16, DRV_NAME "%c(%d.%d.%d)", | |
2304 | lun->id + 'a', sc->dev->bus->busnum, sc->dev->devnum, lun->num); | |
2305 | ||
2306 | lun->removable = 1; /* XXX Query this from the device */ | |
2307 | lun->changed = 1; /* ub_revalidate clears only */ | |
f4800078 | 2308 | ub_revalidate(sc, lun); |
1da177e4 | 2309 | |
1da177e4 | 2310 | rc = -ENOMEM; |
4fb729f5 | 2311 | if ((disk = alloc_disk(UB_PARTS_PER_LUN)) == NULL) |
1da177e4 LT |
2312 | goto err_diskalloc; |
2313 | ||
f4800078 | 2314 | sprintf(disk->disk_name, DRV_NAME "%c", lun->id + 'a'); |
1da177e4 | 2315 | disk->major = UB_MAJOR; |
4fb729f5 | 2316 | disk->first_minor = lun->id * UB_PARTS_PER_LUN; |
1da177e4 | 2317 | disk->fops = &ub_bd_fops; |
f4800078 | 2318 | disk->private_data = lun; |
64bd8453 | 2319 | disk->driverfs_dev = &sc->intf->dev; |
1da177e4 LT |
2320 | |
2321 | rc = -ENOMEM; | |
65b4fe55 | 2322 | if ((q = blk_init_queue(ub_request_fn, sc->lock)) == NULL) |
1da177e4 LT |
2323 | goto err_blkqinit; |
2324 | ||
2325 | disk->queue = q; | |
2326 | ||
f4800078 | 2327 | blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); |
1da177e4 LT |
2328 | blk_queue_max_hw_segments(q, UB_MAX_REQ_SG); |
2329 | blk_queue_max_phys_segments(q, UB_MAX_REQ_SG); | |
f4800078 | 2330 | blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */ |
1da177e4 | 2331 | blk_queue_max_sectors(q, UB_MAX_SECTORS); |
f4800078 | 2332 | blk_queue_hardsect_size(q, lun->capacity.bsize); |
1da177e4 | 2333 | |
688e9fb1 | 2334 | lun->disk = disk; |
f4800078 | 2335 | q->queuedata = lun; |
688e9fb1 | 2336 | list_add(&lun->link, &sc->luns); |
1da177e4 | 2337 | |
f4800078 PZ |
2338 | set_capacity(disk, lun->capacity.nsec); |
2339 | if (lun->removable) | |
1da177e4 LT |
2340 | disk->flags |= GENHD_FL_REMOVABLE; |
2341 | ||
2342 | add_disk(disk); | |
2343 | ||
2344 | return 0; | |
2345 | ||
2346 | err_blkqinit: | |
2347 | put_disk(disk); | |
2348 | err_diskalloc: | |
f4800078 | 2349 | ub_id_put(lun->id); |
1da177e4 | 2350 | err_id: |
f4800078 PZ |
2351 | kfree(lun); |
2352 | err_alloc: | |
1da177e4 LT |
2353 | return rc; |
2354 | } | |
2355 | ||
2356 | static void ub_disconnect(struct usb_interface *intf) | |
2357 | { | |
2358 | struct ub_dev *sc = usb_get_intfdata(intf); | |
f4800078 | 2359 | struct ub_lun *lun; |
1da177e4 LT |
2360 | unsigned long flags; |
2361 | ||
2362 | /* | |
2363 | * Prevent ub_bd_release from pulling the rug from under us. | |
2364 | * XXX This is starting to look like a kref. | |
2365 | * XXX Why not to take this ref at probe time? | |
2366 | */ | |
2367 | spin_lock_irqsave(&ub_lock, flags); | |
2368 | sc->openc++; | |
2369 | spin_unlock_irqrestore(&ub_lock, flags); | |
2370 | ||
2371 | /* | |
9029b174 | 2372 | * Fence stall clearings, operations triggered by unlinkings and so on. |
1da177e4 LT |
2373 | * We do not attempt to unlink any URBs, because we do not trust the |
2374 | * unlink paths in HC drivers. Also, we get -84 upon disconnect anyway. | |
2375 | */ | |
2376 | atomic_set(&sc->poison, 1); | |
2377 | ||
2c26c9e6 PZ |
2378 | /* |
2379 | * Wait for reset to end, if any. | |
2380 | */ | |
2381 | wait_event(sc->reset_wait, !sc->reset); | |
2382 | ||
1da177e4 LT |
2383 | /* |
2384 | * Blow away queued commands. | |
2385 | * | |
2386 | * Actually, this never works, because before we get here | |
2387 | * the HCD terminates outstanding URB(s). It causes our | |
2388 | * SCSI command queue to advance, commands fail to submit, | |
2389 | * and the whole queue drains. So, we just use this code to | |
2390 | * print warnings. | |
2391 | */ | |
65b4fe55 | 2392 | spin_lock_irqsave(sc->lock, flags); |
1da177e4 LT |
2393 | { |
2394 | struct ub_scsi_cmd *cmd; | |
2395 | int cnt = 0; | |
2c26c9e6 | 2396 | while ((cmd = ub_cmdq_peek(sc)) != NULL) { |
1da177e4 LT |
2397 | cmd->error = -ENOTCONN; |
2398 | cmd->state = UB_CMDST_DONE; | |
1da177e4 LT |
2399 | ub_cmdq_pop(sc); |
2400 | (*cmd->done)(sc, cmd); | |
2401 | cnt++; | |
2402 | } | |
2403 | if (cnt != 0) { | |
2404 | printk(KERN_WARNING "%s: " | |
2405 | "%d was queued after shutdown\n", sc->name, cnt); | |
2406 | } | |
2407 | } | |
65b4fe55 | 2408 | spin_unlock_irqrestore(sc->lock, flags); |
1da177e4 LT |
2409 | |
2410 | /* | |
2411 | * Unregister the upper layer. | |
2412 | */ | |
a69228de | 2413 | list_for_each_entry(lun, &sc->luns, link) { |
688e9fb1 | 2414 | del_gendisk(lun->disk); |
f4800078 PZ |
2415 | /* |
2416 | * I wish I could do: | |
75ad23bc | 2417 | * queue_flag_set(QUEUE_FLAG_DEAD, q); |
f4800078 PZ |
2418 | * As it is, we rely on our internal poisoning and let |
2419 | * the upper levels to spin furiously failing all the I/O. | |
2420 | */ | |
2421 | } | |
1da177e4 LT |
2422 | |
2423 | /* | |
1da177e4 LT |
2424 | * Testing for -EINPROGRESS is always a bug, so we are bending |
2425 | * the rules a little. | |
2426 | */ | |
65b4fe55 | 2427 | spin_lock_irqsave(sc->lock, flags); |
1da177e4 LT |
2428 | if (sc->work_urb.status == -EINPROGRESS) { /* janitors: ignore */ |
2429 | printk(KERN_WARNING "%s: " | |
2430 | "URB is active after disconnect\n", sc->name); | |
2431 | } | |
65b4fe55 | 2432 | spin_unlock_irqrestore(sc->lock, flags); |
1da177e4 LT |
2433 | |
2434 | /* | |
9029b174 | 2435 | * There is virtually no chance that other CPU runs a timeout so long |
1da177e4 LT |
2436 | * after ub_urb_complete should have called del_timer, but only if HCD |
2437 | * didn't forget to deliver a callback on unlink. | |
2438 | */ | |
2439 | del_timer_sync(&sc->work_timer); | |
2440 | ||
2441 | /* | |
2442 | * At this point there must be no commands coming from anyone | |
2443 | * and no URBs left in transit. | |
2444 | */ | |
2445 | ||
1da177e4 LT |
2446 | ub_put(sc); |
2447 | } | |
2448 | ||
2449 | static struct usb_driver ub_driver = { | |
1da177e4 LT |
2450 | .name = "ub", |
2451 | .probe = ub_probe, | |
2452 | .disconnect = ub_disconnect, | |
2453 | .id_table = ub_usb_ids, | |
2454 | }; | |
2455 | ||
2456 | static int __init ub_init(void) | |
2457 | { | |
2458 | int rc; | |
65b4fe55 PZ |
2459 | int i; |
2460 | ||
2461 | for (i = 0; i < UB_QLOCK_NUM; i++) | |
2462 | spin_lock_init(&ub_qlockv[i]); | |
1da177e4 | 2463 | |
1da177e4 LT |
2464 | if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0) |
2465 | goto err_regblkdev; | |
1da177e4 LT |
2466 | |
2467 | if ((rc = usb_register(&ub_driver)) != 0) | |
2468 | goto err_register; | |
2469 | ||
a00828e9 | 2470 | usb_usual_set_present(USB_US_TYPE_UB); |
1da177e4 LT |
2471 | return 0; |
2472 | ||
2473 | err_register: | |
1da177e4 LT |
2474 | unregister_blkdev(UB_MAJOR, DRV_NAME); |
2475 | err_regblkdev: | |
2476 | return rc; | |
2477 | } | |
2478 | ||
2479 | static void __exit ub_exit(void) | |
2480 | { | |
2481 | usb_deregister(&ub_driver); | |
2482 | ||
1da177e4 | 2483 | unregister_blkdev(UB_MAJOR, DRV_NAME); |
a00828e9 | 2484 | usb_usual_clear_present(USB_US_TYPE_UB); |
1da177e4 LT |
2485 | } |
2486 | ||
2487 | module_init(ub_init); | |
2488 | module_exit(ub_exit); | |
2489 | ||
2490 | MODULE_LICENSE("GPL"); |