2 * f_mass_storage.c -- Mass Storage USB Composite Function
4 * Copyright (C) 2003-2008 Alan Stern
5 * Copyright (C) 2009 Samsung Electronics
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions, and the following disclaimer,
14 * without modification.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The names of the above-listed copyright holders may not be used
19 * to endorse or promote products derived from this software without
20 * specific prior written permission.
22 * ALTERNATIVELY, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") as published by the Free Software
24 * Foundation, either version 2 of that License or (at your option) any
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
28 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
29 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
31 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
32 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
33 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
34 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
35 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
36 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
37 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 * The Mass Storage Function acts as a USB Mass Storage device,
43 * appearing to the host as a disk drive or as a CD-ROM drive. In
44 * addition to providing an example of a genuinely useful composite
45 * function for a USB device, it also illustrates a technique of
46 * double-buffering for increased throughput.
48 * Function supports multiple logical units (LUNs). Backing storage
49 * for each LUN is provided by a regular file or a block device.
50 * Access for each LUN can be limited to read-only. Moreover, the
51 * function can indicate that LUN is removable and/or CD-ROM. (The
52 * later implies read-only access.)
54 * MSF is configured by specifying a fsg_config structure. It has the
57 * nluns Number of LUNs function have (anywhere from 1
58 * to FSG_MAX_LUNS which is 8).
59 * luns An array of LUN configuration values. This
60 * should be filled for each LUN that
61 * function will include (ie. for "nluns"
62 * LUNs). Each element of the array has
63 * the following fields:
64 * ->filename The path to the backing file for the LUN.
65 * Required if LUN is not marked as
67 * ->ro Flag specifying access to the LUN shall be
68 * read-only. This is implied if CD-ROM
69 * emulation is enabled as well as when
70 * it was impossible to open "filename"
72 * ->removable Flag specifying that LUN shall be indicated as
74 * ->cdrom Flag specifying that LUN shall be reported as
77 * lun_name_format A printf-like format for names of the LUN
78 * devices. This determines how the
79 * directory in sysfs will be named.
80 * Unless you are using several MSFs in
81 * a single gadget (as opposed to single
82 * MSF in many configurations) you may
83 * leave it as NULL (in which case
84 * "lun%d" will be used). In the format
85 * you can use "%d" to index LUNs for
86 * MSF's with more than one LUN. (Beware
87 * that there is only one integer given
88 * as an argument for the format and
89 * specifying invalid format may cause
90 * unspecified behaviour.)
91 * thread_name Name of the kernel thread process used by the
92 * MSF. You can safely set it to NULL
93 * (in which case default "file-storage"
98 * release Information used as a reply to INQUIRY
99 * request. To use default set to NULL,
100 * NULL, 0xffff respectively. The first
101 * field should be 8 and the second 16
102 * characters or less.
104 * can_stall Set to permit function to halt bulk endpoints.
105 * Disabled on some USB devices known not
106 * to work correctly. You should set it
109 * If "removable" is not set for a LUN then a backing file must be
110 * specified. If it is set, then NULL filename means the LUN's medium
111 * is not loaded (an empty string as "filename" in the fsg_config
112 * structure causes error). The CD-ROM emulation includes a single
113 * data track and no audio tracks; hence there need be only one
114 * backing file per LUN. Note also that the CD-ROM block length is
115 * set to 512 rather than the more common value 2048.
118 * MSF includes support for module parameters. If gadget using it
119 * decides to use it, the following module parameters will be
122 * file=filename[,filename...]
123 * Names of the files or block devices used for
125 * ro=b[,b...] Default false, boolean for read-only access.
127 * Default true, boolean for removable media.
128 * cdrom=b[,b...] Default false, boolean for whether to emulate
130 * luns=N Default N = number of filenames, number of
132 * stall Default determined according to the type of
133 * USB device controller (usually true),
134 * boolean to permit the driver to halt
137 * The module parameters may be prefixed with some string. You need
138 * to consult gadget's documentation or source to verify whether it is
139 * using those module parameters and if it does what are the prefixes
140 * (look for FSG_MODULE_PARAMETERS() macro usage, what's inside it is
144 * Requirements are modest; only a bulk-in and a bulk-out endpoint are
145 * needed. The memory requirement amounts to two 16K buffers, size
146 * configurable by a parameter. Support is included for both
147 * full-speed and high-speed operation.
149 * Note that the driver is slightly non-portable in that it assumes a
150 * single memory/DMA buffer will be useable for bulk-in, bulk-out, and
151 * interrupt-in endpoints. With most device controllers this isn't an
152 * issue, but there may be some with hardware restrictions that prevent
153 * a buffer from being used by more than one endpoint.
156 * The pathnames of the backing files and the ro settings are
157 * available in the attribute files "file" and "ro" in the lun<n> (or
158 * to be more precise in a directory which name comes from
159 * "lun_name_format" option!) subdirectory of the gadget's sysfs
160 * directory. If the "removable" option is set, writing to these
161 * files will simulate ejecting/loading the medium (writing an empty
162 * line means eject) and adjusting a write-enable tab. Changes to the
163 * ro setting are not allowed when the medium is loaded or if CD-ROM
164 * emulation is being used.
167 * This function is heavily based on "File-backed Storage Gadget" by
168 * Alan Stern which in turn is heavily based on "Gadget Zero" by David
169 * Brownell. The driver's SCSI command interface was based on the
170 * "Information technology - Small Computer System Interface - 2"
171 * document from X3T9.2 Project 375D, Revision 10L, 7-SEP-93,
172 * available at <http://www.t10.org/ftp/t10/drafts/s2/s2-r10l.pdf>.
173 * The single exception is opcode 0x23 (READ FORMAT CAPACITIES), which
174 * was based on the "Universal Serial Bus Mass Storage Class UFI
175 * Command Specification" document, Revision 1.0, December 14, 1998,
177 * <http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf>.
184 * The MSF is fairly straightforward. There is a main kernel
185 * thread that handles most of the work. Interrupt routines field
186 * callbacks from the controller driver: bulk- and interrupt-request
187 * completion notifications, endpoint-0 events, and disconnect events.
188 * Completion events are passed to the main thread by wakeup calls. Many
189 * ep0 requests are handled at interrupt time, but SetInterface,
190 * SetConfiguration, and device reset requests are forwarded to the
191 * thread in the form of "exceptions" using SIGUSR1 signals (since they
192 * should interrupt any ongoing file I/O operations).
194 * The thread's main routine implements the standard command/data/status
195 * parts of a SCSI interaction. It and its subroutines are full of tests
196 * for pending signals/exceptions -- all this polling is necessary since
197 * the kernel has no setjmp/longjmp equivalents. (Maybe this is an
198 * indication that the driver really wants to be running in userspace.)
199 * An important point is that so long as the thread is alive it keeps an
200 * open reference to the backing file. This will prevent unmounting
201 * the backing file's underlying filesystem and could cause problems
202 * during system shutdown, for example. To prevent such problems, the
203 * thread catches INT, TERM, and KILL signals and converts them into
206 * In normal operation the main thread is started during the gadget's
207 * fsg_bind() callback and stopped during fsg_unbind(). But it can
208 * also exit when it receives a signal, and there's no point leaving
209 * the gadget running when the thread is dead. At of this moment, MSF
210 * provides no way to deregister the gadget when thread dies -- maybe
211 * a callback functions is needed.
213 * To provide maximum throughput, the driver uses a circular pipeline of
214 * buffer heads (struct fsg_buffhd). In principle the pipeline can be
215 * arbitrarily long; in practice the benefits don't justify having more
216 * than 2 stages (i.e., double buffering). But it helps to think of the
217 * pipeline as being a long one. Each buffer head contains a bulk-in and
218 * a bulk-out request pointer (since the buffer can be used for both
219 * output and input -- directions always are given from the host's
220 * point of view) as well as a pointer to the buffer and various state
223 * Use of the pipeline follows a simple protocol. There is a variable
224 * (fsg->next_buffhd_to_fill) that points to the next buffer head to use.
225 * At any time that buffer head may still be in use from an earlier
226 * request, so each buffer head has a state variable indicating whether
227 * it is EMPTY, FULL, or BUSY. Typical use involves waiting for the
228 * buffer head to be EMPTY, filling the buffer either by file I/O or by
229 * USB I/O (during which the buffer head is BUSY), and marking the buffer
230 * head FULL when the I/O is complete. Then the buffer will be emptied
231 * (again possibly by USB I/O, during which it is marked BUSY) and
232 * finally marked EMPTY again (possibly by a completion routine).
234 * A module parameter tells the driver to avoid stalling the bulk
235 * endpoints wherever the transport specification allows. This is
236 * necessary for some UDCs like the SuperH, which cannot reliably clear a
237 * halt on a bulk endpoint. However, under certain circumstances the
238 * Bulk-only specification requires a stall. In such cases the driver
239 * will halt the endpoint and set a flag indicating that it should clear
240 * the halt in software during the next device reset. Hopefully this
241 * will permit everything to work correctly. Furthermore, although the
242 * specification allows the bulk-out endpoint to halt when the host sends
243 * too much data, implementing this would cause an unavoidable race.
244 * The driver will always use the "no-stall" approach for OUT transfers.
246 * One subtle point concerns sending status-stage responses for ep0
247 * requests. Some of these requests, such as device reset, can involve
248 * interrupting an ongoing file I/O operation, which might take an
249 * arbitrarily long time. During that delay the host might give up on
250 * the original ep0 request and issue a new one. When that happens the
251 * driver should not notify the host about completion of the original
252 * request, as the host will no longer be waiting for it. So the driver
253 * assigns to each ep0 request a unique tag, and it keeps track of the
254 * tag value of the request associated with a long-running exception
255 * (device-reset, interface-change, or configuration-change). When the
256 * exception handler is finished, the status-stage response is submitted
257 * only if the current ep0 request tag is equal to the exception request
258 * tag. Thus only the most recently received ep0 request will get a
259 * status-stage response.
261 * Warning: This driver source file is too long. It ought to be split up
262 * into a header file plus about 3 separate .c files, to handle the details
263 * of the Gadget, USB Mass Storage, and SCSI protocols.
267 /* #define VERBOSE_DEBUG */
268 /* #define DUMP_MSGS */
271 #include <linux/blkdev.h>
272 #include <linux/completion.h>
273 #include <linux/dcache.h>
274 #include <linux/delay.h>
275 #include <linux/device.h>
276 #include <linux/fcntl.h>
277 #include <linux/file.h>
278 #include <linux/fs.h>
279 #include <linux/kref.h>
280 #include <linux/kthread.h>
281 #include <linux/limits.h>
282 #include <linux/rwsem.h>
283 #include <linux/slab.h>
284 #include <linux/spinlock.h>
285 #include <linux/string.h>
286 #include <linux/freezer.h>
287 #include <linux/utsname.h>
289 #include <linux/usb/ch9.h>
290 #include <linux/usb/gadget.h>
292 #include "gadget_chips.h"
296 /*------------------------------------------------------------------------*/
298 #define FSG_DRIVER_DESC "Mass Storage Function"
299 #define FSG_DRIVER_VERSION "2009/09/11"
301 static const char fsg_string_interface[] = "Mass Storage";
304 #define FSG_NO_INTR_EP 1
305 #define FSG_BUFFHD_STATIC_BUFFER 1
306 #define FSG_NO_DEVICE_STRINGS 1
308 #define FSG_NO_INTR_EP 1
310 #include "storage_common.c"
313 /*-------------------------------------------------------------------------*/
318 /* Data shared by all the FSG instances. */
320 struct usb_gadget *gadget;
322 struct fsg_dev *prev_fsg;
324 /* filesem protects: backing files in use */
325 struct rw_semaphore filesem;
327 /* lock protects: state, all the req_busy's */
330 struct usb_ep *ep0; /* Copy of gadget->ep0 */
331 struct usb_request *ep0req; /* Copy of cdev->req */
332 unsigned int ep0_req_tag;
333 const char *ep0req_name;
335 struct fsg_buffhd *next_buffhd_to_fill;
336 struct fsg_buffhd *next_buffhd_to_drain;
337 struct fsg_buffhd buffhds[FSG_NUM_BUFFERS];
340 u8 cmnd[MAX_COMMAND_SIZE];
344 struct fsg_lun *luns;
345 struct fsg_lun *curlun;
347 unsigned int bulk_out_maxpacket;
348 enum fsg_state state; /* For exception handling */
349 unsigned int exception_req_tag;
351 u8 config, new_config;
352 enum data_direction data_dir;
354 u32 data_size_from_cmnd;
359 unsigned int can_stall:1;
360 unsigned int free_storage_on_release:1;
361 unsigned int phase_error:1;
362 unsigned int short_packet_received:1;
363 unsigned int bad_lun_okay:1;
364 unsigned int running:1;
366 int thread_wakeup_needed;
367 struct completion thread_notifier;
368 struct task_struct *thread_task;
370 /* Callback function to call when thread exits. */
371 int (*thread_exits)(struct fsg_common *common);
372 /* Gadget's private data. */
375 /* Vendor (8 chars), product (16 chars), release (4
376 * hexadecimal digits) and NUL byte */
377 char inquiry_string[8 + 16 + 4 + 1];
385 struct fsg_lun_config {
386 const char *filename;
390 } luns[FSG_MAX_LUNS];
392 const char *lun_name_format;
393 const char *thread_name;
395 /* Callback function to call when thread exits. If no
396 * callback is set or it returns value lower then zero MSF
397 * will force eject all LUNs it operates on (including those
398 * marked as non-removable or with prevent_medium_removal flag
400 int (*thread_exits)(struct fsg_common *common);
401 /* Gadget's private data. */
404 const char *vendor_name; /* 8 characters or less */
405 const char *product_name; /* 16 characters or less */
413 struct usb_function function;
414 struct usb_gadget *gadget; /* Copy of cdev->gadget */
415 struct fsg_common *common;
417 u16 interface_number;
419 unsigned int bulk_in_enabled:1;
420 unsigned int bulk_out_enabled:1;
422 unsigned long atomic_bitflags;
423 #define IGNORE_BULK_OUT 0
425 struct usb_ep *bulk_in;
426 struct usb_ep *bulk_out;
430 static inline int __fsg_is_set(struct fsg_common *common,
431 const char *func, unsigned line)
435 ERROR(common, "common->fsg is NULL in %s at %u\n", func, line);
439 #define fsg_is_set(common) likely(__fsg_is_set(common, __func__, __LINE__))
442 static inline struct fsg_dev *fsg_from_func(struct usb_function *f)
444 return container_of(f, struct fsg_dev, function);
448 typedef void (*fsg_routine_t)(struct fsg_dev *);
450 static int exception_in_progress(struct fsg_common *common)
452 return common->state > FSG_STATE_IDLE;
455 /* Make bulk-out requests be divisible by the maxpacket size */
456 static void set_bulk_out_req_length(struct fsg_common *common,
457 struct fsg_buffhd *bh, unsigned int length)
461 bh->bulk_out_intended_length = length;
462 rem = length % common->bulk_out_maxpacket;
464 length += common->bulk_out_maxpacket - rem;
465 bh->outreq->length = length;
468 /*-------------------------------------------------------------------------*/
470 static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
474 if (ep == fsg->bulk_in)
476 else if (ep == fsg->bulk_out)
480 DBG(fsg, "%s set halt\n", name);
481 return usb_ep_set_halt(ep);
485 /*-------------------------------------------------------------------------*/
487 /* These routines may be called in process context or in_irq */
489 /* Caller must hold fsg->lock */
490 static void wakeup_thread(struct fsg_common *common)
492 /* Tell the main thread that something has happened */
493 common->thread_wakeup_needed = 1;
494 if (common->thread_task)
495 wake_up_process(common->thread_task);
499 static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
503 /* Do nothing if a higher-priority exception is already in progress.
504 * If a lower-or-equal priority exception is in progress, preempt it
505 * and notify the main thread by sending it a signal. */
506 spin_lock_irqsave(&common->lock, flags);
507 if (common->state <= new_state) {
508 common->exception_req_tag = common->ep0_req_tag;
509 common->state = new_state;
510 if (common->thread_task)
511 send_sig_info(SIGUSR1, SEND_SIG_FORCED,
512 common->thread_task);
514 spin_unlock_irqrestore(&common->lock, flags);
518 /*-------------------------------------------------------------------------*/
520 static int ep0_queue(struct fsg_common *common)
524 rc = usb_ep_queue(common->ep0, common->ep0req, GFP_ATOMIC);
525 common->ep0->driver_data = common;
526 if (rc != 0 && rc != -ESHUTDOWN) {
527 /* We can't do much more than wait for a reset */
528 WARNING(common, "error in submission: %s --> %d\n",
529 common->ep0->name, rc);
534 /*-------------------------------------------------------------------------*/
536 /* Bulk and interrupt endpoint completion handlers.
537 * These always run in_irq. */
539 static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
541 struct fsg_common *common = ep->driver_data;
542 struct fsg_buffhd *bh = req->context;
544 if (req->status || req->actual != req->length)
545 DBG(common, "%s --> %d, %u/%u\n", __func__,
546 req->status, req->actual, req->length);
547 if (req->status == -ECONNRESET) /* Request was cancelled */
548 usb_ep_fifo_flush(ep);
550 /* Hold the lock while we update the request and buffer states */
552 spin_lock(&common->lock);
554 bh->state = BUF_STATE_EMPTY;
555 wakeup_thread(common);
556 spin_unlock(&common->lock);
559 static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
561 struct fsg_common *common = ep->driver_data;
562 struct fsg_buffhd *bh = req->context;
564 dump_msg(common, "bulk-out", req->buf, req->actual);
565 if (req->status || req->actual != bh->bulk_out_intended_length)
566 DBG(common, "%s --> %d, %u/%u\n", __func__,
567 req->status, req->actual,
568 bh->bulk_out_intended_length);
569 if (req->status == -ECONNRESET) /* Request was cancelled */
570 usb_ep_fifo_flush(ep);
572 /* Hold the lock while we update the request and buffer states */
574 spin_lock(&common->lock);
576 bh->state = BUF_STATE_FULL;
577 wakeup_thread(common);
578 spin_unlock(&common->lock);
582 /*-------------------------------------------------------------------------*/
584 /* Ep0 class-specific handlers. These always run in_irq. */
586 static int fsg_setup(struct usb_function *f,
587 const struct usb_ctrlrequest *ctrl)
589 struct fsg_dev *fsg = fsg_from_func(f);
590 struct usb_request *req = fsg->common->ep0req;
591 u16 w_index = le16_to_cpu(ctrl->wIndex);
592 u16 w_value = le16_to_cpu(ctrl->wValue);
593 u16 w_length = le16_to_cpu(ctrl->wLength);
595 if (!fsg->common->config)
598 switch (ctrl->bRequest) {
600 case USB_BULK_RESET_REQUEST:
601 if (ctrl->bRequestType !=
602 (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
604 if (w_index != fsg->interface_number || w_value != 0)
607 /* Raise an exception to stop the current operation
608 * and reinitialize our state. */
609 DBG(fsg, "bulk reset request\n");
610 raise_exception(fsg->common, FSG_STATE_RESET);
611 return DELAYED_STATUS;
613 case USB_BULK_GET_MAX_LUN_REQUEST:
614 if (ctrl->bRequestType !=
615 (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
617 if (w_index != fsg->interface_number || w_value != 0)
619 VDBG(fsg, "get max LUN\n");
620 *(u8 *) req->buf = fsg->common->nluns - 1;
622 /* Respond with data/status */
623 req->length = min((u16)1, w_length);
624 fsg->common->ep0req_name =
625 ctrl->bRequestType & USB_DIR_IN ? "ep0-in" : "ep0-out";
626 return ep0_queue(fsg->common);
630 "unknown class-specific control req "
631 "%02x.%02x v%04x i%04x l%u\n",
632 ctrl->bRequestType, ctrl->bRequest,
633 le16_to_cpu(ctrl->wValue), w_index, w_length);
638 /*-------------------------------------------------------------------------*/
640 /* All the following routines run in process context */
643 /* Use this for bulk or interrupt transfers, not ep0 */
644 static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
645 struct usb_request *req, int *pbusy,
646 enum fsg_buffer_state *state)
650 if (ep == fsg->bulk_in)
651 dump_msg(fsg, "bulk-in", req->buf, req->length);
653 spin_lock_irq(&fsg->common->lock);
655 *state = BUF_STATE_BUSY;
656 spin_unlock_irq(&fsg->common->lock);
657 rc = usb_ep_queue(ep, req, GFP_KERNEL);
660 *state = BUF_STATE_EMPTY;
662 /* We can't do much more than wait for a reset */
664 /* Note: currently the net2280 driver fails zero-length
665 * submissions if DMA is enabled. */
666 if (rc != -ESHUTDOWN && !(rc == -EOPNOTSUPP &&
668 WARNING(fsg, "error in submission: %s --> %d\n",
673 #define START_TRANSFER_OR(common, ep_name, req, pbusy, state) \
674 if (fsg_is_set(common)) \
675 start_transfer((common)->fsg, (common)->fsg->ep_name, \
676 req, pbusy, state); \
679 #define START_TRANSFER(common, ep_name, req, pbusy, state) \
680 START_TRANSFER_OR(common, ep_name, req, pbusy, state) (void)0
684 static int sleep_thread(struct fsg_common *common)
688 /* Wait until a signal arrives or we are woken up */
691 set_current_state(TASK_INTERRUPTIBLE);
692 if (signal_pending(current)) {
696 if (common->thread_wakeup_needed)
700 __set_current_state(TASK_RUNNING);
701 common->thread_wakeup_needed = 0;
706 /*-------------------------------------------------------------------------*/
708 static int do_read(struct fsg_common *common)
710 struct fsg_lun *curlun = common->curlun;
712 struct fsg_buffhd *bh;
715 loff_t file_offset, file_offset_tmp;
717 unsigned int partial_page;
720 /* Get the starting Logical Block Address and check that it's
722 if (common->cmnd[0] == SC_READ_6)
723 lba = get_unaligned_be24(&common->cmnd[1]);
725 lba = get_unaligned_be32(&common->cmnd[2]);
727 /* We allow DPO (Disable Page Out = don't save data in the
728 * cache) and FUA (Force Unit Access = don't read from the
729 * cache), but we don't implement them. */
730 if ((common->cmnd[1] & ~0x18) != 0) {
731 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
735 if (lba >= curlun->num_sectors) {
736 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
739 file_offset = ((loff_t) lba) << 9;
741 /* Carry out the file reads */
742 amount_left = common->data_size_from_cmnd;
743 if (unlikely(amount_left == 0))
744 return -EIO; /* No default reply */
748 /* Figure out how much we need to read:
749 * Try to read the remaining amount.
750 * But don't read more than the buffer size.
751 * And don't try to read past the end of the file.
752 * Finally, if we're not at a page boundary, don't read past
754 * If this means reading 0 then we were asked to read past
755 * the end of file. */
756 amount = min(amount_left, FSG_BUFLEN);
757 amount = min((loff_t) amount,
758 curlun->file_length - file_offset);
759 partial_page = file_offset & (PAGE_CACHE_SIZE - 1);
760 if (partial_page > 0)
761 amount = min(amount, (unsigned int) PAGE_CACHE_SIZE -
764 /* Wait for the next buffer to become available */
765 bh = common->next_buffhd_to_fill;
766 while (bh->state != BUF_STATE_EMPTY) {
767 rc = sleep_thread(common);
772 /* If we were asked to read past the end of file,
773 * end with an empty buffer. */
776 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
777 curlun->sense_data_info = file_offset >> 9;
778 curlun->info_valid = 1;
779 bh->inreq->length = 0;
780 bh->state = BUF_STATE_FULL;
784 /* Perform the read */
785 file_offset_tmp = file_offset;
786 nread = vfs_read(curlun->filp,
787 (char __user *) bh->buf,
788 amount, &file_offset_tmp);
789 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
790 (unsigned long long) file_offset,
792 if (signal_pending(current))
796 LDBG(curlun, "error in file read: %d\n",
799 } else if (nread < amount) {
800 LDBG(curlun, "partial file read: %d/%u\n",
801 (int) nread, amount);
802 nread -= (nread & 511); /* Round down to a block */
804 file_offset += nread;
805 amount_left -= nread;
806 common->residue -= nread;
807 bh->inreq->length = nread;
808 bh->state = BUF_STATE_FULL;
810 /* If an error occurred, report it and its position */
811 if (nread < amount) {
812 curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
813 curlun->sense_data_info = file_offset >> 9;
814 curlun->info_valid = 1;
818 if (amount_left == 0)
819 break; /* No more left to read */
821 /* Send this buffer and go read some more */
823 START_TRANSFER_OR(common, bulk_in, bh->inreq,
824 &bh->inreq_busy, &bh->state)
825 /* Don't know what to do if
826 * common->fsg is NULL */
828 common->next_buffhd_to_fill = bh->next;
831 return -EIO; /* No default reply */
835 /*-------------------------------------------------------------------------*/
837 static int do_write(struct fsg_common *common)
839 struct fsg_lun *curlun = common->curlun;
841 struct fsg_buffhd *bh;
843 u32 amount_left_to_req, amount_left_to_write;
844 loff_t usb_offset, file_offset, file_offset_tmp;
846 unsigned int partial_page;
851 curlun->sense_data = SS_WRITE_PROTECTED;
854 spin_lock(&curlun->filp->f_lock);
855 curlun->filp->f_flags &= ~O_SYNC; /* Default is not to wait */
856 spin_unlock(&curlun->filp->f_lock);
858 /* Get the starting Logical Block Address and check that it's
860 if (common->cmnd[0] == SC_WRITE_6)
861 lba = get_unaligned_be24(&common->cmnd[1]);
863 lba = get_unaligned_be32(&common->cmnd[2]);
865 /* We allow DPO (Disable Page Out = don't save data in the
866 * cache) and FUA (Force Unit Access = write directly to the
867 * medium). We don't implement DPO; we implement FUA by
868 * performing synchronous output. */
869 if (common->cmnd[1] & ~0x18) {
870 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
873 if (common->cmnd[1] & 0x08) { /* FUA */
874 spin_lock(&curlun->filp->f_lock);
875 curlun->filp->f_flags |= O_SYNC;
876 spin_unlock(&curlun->filp->f_lock);
879 if (lba >= curlun->num_sectors) {
880 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
884 /* Carry out the file writes */
886 file_offset = usb_offset = ((loff_t) lba) << 9;
887 amount_left_to_req = common->data_size_from_cmnd;
888 amount_left_to_write = common->data_size_from_cmnd;
890 while (amount_left_to_write > 0) {
892 /* Queue a request for more data from the host */
893 bh = common->next_buffhd_to_fill;
894 if (bh->state == BUF_STATE_EMPTY && get_some_more) {
896 /* Figure out how much we want to get:
897 * Try to get the remaining amount.
898 * But don't get more than the buffer size.
899 * And don't try to go past the end of the file.
900 * If we're not at a page boundary,
901 * don't go past the next page.
902 * If this means getting 0, then we were asked
903 * to write past the end of file.
904 * Finally, round down to a block boundary. */
905 amount = min(amount_left_to_req, FSG_BUFLEN);
906 amount = min((loff_t) amount, curlun->file_length -
908 partial_page = usb_offset & (PAGE_CACHE_SIZE - 1);
909 if (partial_page > 0)
911 (unsigned int) PAGE_CACHE_SIZE - partial_page);
916 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
917 curlun->sense_data_info = usb_offset >> 9;
918 curlun->info_valid = 1;
921 amount -= (amount & 511);
924 /* Why were we were asked to transfer a
930 /* Get the next buffer */
931 usb_offset += amount;
932 common->usb_amount_left -= amount;
933 amount_left_to_req -= amount;
934 if (amount_left_to_req == 0)
937 /* amount is always divisible by 512, hence by
938 * the bulk-out maxpacket size */
939 bh->outreq->length = amount;
940 bh->bulk_out_intended_length = amount;
941 bh->outreq->short_not_ok = 1;
942 START_TRANSFER_OR(common, bulk_out, bh->outreq,
943 &bh->outreq_busy, &bh->state)
944 /* Don't know what to do if
945 * common->fsg is NULL */
947 common->next_buffhd_to_fill = bh->next;
951 /* Write the received data to the backing file */
952 bh = common->next_buffhd_to_drain;
953 if (bh->state == BUF_STATE_EMPTY && !get_some_more)
954 break; /* We stopped early */
955 if (bh->state == BUF_STATE_FULL) {
957 common->next_buffhd_to_drain = bh->next;
958 bh->state = BUF_STATE_EMPTY;
960 /* Did something go wrong with the transfer? */
961 if (bh->outreq->status != 0) {
962 curlun->sense_data = SS_COMMUNICATION_FAILURE;
963 curlun->sense_data_info = file_offset >> 9;
964 curlun->info_valid = 1;
968 amount = bh->outreq->actual;
969 if (curlun->file_length - file_offset < amount) {
971 "write %u @ %llu beyond end %llu\n",
972 amount, (unsigned long long) file_offset,
973 (unsigned long long) curlun->file_length);
974 amount = curlun->file_length - file_offset;
977 /* Perform the write */
978 file_offset_tmp = file_offset;
979 nwritten = vfs_write(curlun->filp,
980 (char __user *) bh->buf,
981 amount, &file_offset_tmp);
982 VLDBG(curlun, "file write %u @ %llu -> %d\n", amount,
983 (unsigned long long) file_offset,
985 if (signal_pending(current))
986 return -EINTR; /* Interrupted! */
989 LDBG(curlun, "error in file write: %d\n",
992 } else if (nwritten < amount) {
993 LDBG(curlun, "partial file write: %d/%u\n",
994 (int) nwritten, amount);
995 nwritten -= (nwritten & 511);
996 /* Round down to a block */
998 file_offset += nwritten;
999 amount_left_to_write -= nwritten;
1000 common->residue -= nwritten;
1002 /* If an error occurred, report it and its position */
1003 if (nwritten < amount) {
1004 curlun->sense_data = SS_WRITE_ERROR;
1005 curlun->sense_data_info = file_offset >> 9;
1006 curlun->info_valid = 1;
1010 /* Did the host decide to stop early? */
1011 if (bh->outreq->actual != bh->outreq->length) {
1012 common->short_packet_received = 1;
1018 /* Wait for something to happen */
1019 rc = sleep_thread(common);
1024 return -EIO; /* No default reply */
1028 /*-------------------------------------------------------------------------*/
1030 static int do_synchronize_cache(struct fsg_common *common)
1032 struct fsg_lun *curlun = common->curlun;
1035 /* We ignore the requested LBA and write out all file's
1036 * dirty data buffers. */
1037 rc = fsg_lun_fsync_sub(curlun);
1039 curlun->sense_data = SS_WRITE_ERROR;
1044 /*-------------------------------------------------------------------------*/
1046 static void invalidate_sub(struct fsg_lun *curlun)
1048 struct file *filp = curlun->filp;
1049 struct inode *inode = filp->f_path.dentry->d_inode;
1052 rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
1053 VLDBG(curlun, "invalidate_mapping_pages -> %ld\n", rc);
1056 static int do_verify(struct fsg_common *common)
1058 struct fsg_lun *curlun = common->curlun;
1060 u32 verification_length;
1061 struct fsg_buffhd *bh = common->next_buffhd_to_fill;
1062 loff_t file_offset, file_offset_tmp;
1064 unsigned int amount;
1067 /* Get the starting Logical Block Address and check that it's
1069 lba = get_unaligned_be32(&common->cmnd[2]);
1070 if (lba >= curlun->num_sectors) {
1071 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1075 /* We allow DPO (Disable Page Out = don't save data in the
1076 * cache) but we don't implement it. */
1077 if (common->cmnd[1] & ~0x10) {
1078 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1082 verification_length = get_unaligned_be16(&common->cmnd[7]);
1083 if (unlikely(verification_length == 0))
1084 return -EIO; /* No default reply */
1086 /* Prepare to carry out the file verify */
1087 amount_left = verification_length << 9;
1088 file_offset = ((loff_t) lba) << 9;
1090 /* Write out all the dirty buffers before invalidating them */
1091 fsg_lun_fsync_sub(curlun);
1092 if (signal_pending(current))
1095 invalidate_sub(curlun);
1096 if (signal_pending(current))
1099 /* Just try to read the requested blocks */
1100 while (amount_left > 0) {
1102 /* Figure out how much we need to read:
1103 * Try to read the remaining amount, but not more than
1105 * And don't try to read past the end of the file.
1106 * If this means reading 0 then we were asked to read
1107 * past the end of file. */
1108 amount = min(amount_left, FSG_BUFLEN);
1109 amount = min((loff_t) amount,
1110 curlun->file_length - file_offset);
1112 curlun->sense_data =
1113 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1114 curlun->sense_data_info = file_offset >> 9;
1115 curlun->info_valid = 1;
1119 /* Perform the read */
1120 file_offset_tmp = file_offset;
1121 nread = vfs_read(curlun->filp,
1122 (char __user *) bh->buf,
1123 amount, &file_offset_tmp);
1124 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
1125 (unsigned long long) file_offset,
1127 if (signal_pending(current))
1131 LDBG(curlun, "error in file verify: %d\n",
1134 } else if (nread < amount) {
1135 LDBG(curlun, "partial file verify: %d/%u\n",
1136 (int) nread, amount);
1137 nread -= (nread & 511); /* Round down to a sector */
1140 curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
1141 curlun->sense_data_info = file_offset >> 9;
1142 curlun->info_valid = 1;
1145 file_offset += nread;
1146 amount_left -= nread;
1152 /*-------------------------------------------------------------------------*/
1154 static int do_inquiry(struct fsg_common *common, struct fsg_buffhd *bh)
1156 struct fsg_lun *curlun = common->curlun;
1157 u8 *buf = (u8 *) bh->buf;
1159 if (!curlun) { /* Unsupported LUNs are okay */
1160 common->bad_lun_okay = 1;
1162 buf[0] = 0x7f; /* Unsupported, no device-type */
1163 buf[4] = 31; /* Additional length */
1167 buf[0] = curlun->cdrom ? TYPE_CDROM : TYPE_DISK;
1168 buf[1] = curlun->removable ? 0x80 : 0;
1169 buf[2] = 2; /* ANSI SCSI level 2 */
1170 buf[3] = 2; /* SCSI-2 INQUIRY data format */
1171 buf[4] = 31; /* Additional length */
1172 buf[5] = 0; /* No special options */
1175 memcpy(buf + 8, common->inquiry_string, sizeof common->inquiry_string);
1180 static int do_request_sense(struct fsg_common *common, struct fsg_buffhd *bh)
1182 struct fsg_lun *curlun = common->curlun;
1183 u8 *buf = (u8 *) bh->buf;
1188 * From the SCSI-2 spec., section 7.9 (Unit attention condition):
1190 * If a REQUEST SENSE command is received from an initiator
1191 * with a pending unit attention condition (before the target
1192 * generates the contingent allegiance condition), then the
1193 * target shall either:
1194 * a) report any pending sense data and preserve the unit
1195 * attention condition on the logical unit, or,
1196 * b) report the unit attention condition, may discard any
1197 * pending sense data, and clear the unit attention
1198 * condition on the logical unit for that initiator.
1200 * FSG normally uses option a); enable this code to use option b).
1203 if (curlun && curlun->unit_attention_data != SS_NO_SENSE) {
1204 curlun->sense_data = curlun->unit_attention_data;
1205 curlun->unit_attention_data = SS_NO_SENSE;
1209 if (!curlun) { /* Unsupported LUNs are okay */
1210 common->bad_lun_okay = 1;
1211 sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
1215 sd = curlun->sense_data;
1216 sdinfo = curlun->sense_data_info;
1217 valid = curlun->info_valid << 7;
1218 curlun->sense_data = SS_NO_SENSE;
1219 curlun->sense_data_info = 0;
1220 curlun->info_valid = 0;
1224 buf[0] = valid | 0x70; /* Valid, current error */
1226 put_unaligned_be32(sdinfo, &buf[3]); /* Sense information */
1227 buf[7] = 18 - 8; /* Additional sense length */
1234 static int do_read_capacity(struct fsg_common *common, struct fsg_buffhd *bh)
1236 struct fsg_lun *curlun = common->curlun;
1237 u32 lba = get_unaligned_be32(&common->cmnd[2]);
1238 int pmi = common->cmnd[8];
1239 u8 *buf = (u8 *) bh->buf;
1241 /* Check the PMI and LBA fields */
1242 if (pmi > 1 || (pmi == 0 && lba != 0)) {
1243 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1247 put_unaligned_be32(curlun->num_sectors - 1, &buf[0]);
1248 /* Max logical block */
1249 put_unaligned_be32(512, &buf[4]); /* Block length */
1254 static int do_read_header(struct fsg_common *common, struct fsg_buffhd *bh)
1256 struct fsg_lun *curlun = common->curlun;
1257 int msf = common->cmnd[1] & 0x02;
1258 u32 lba = get_unaligned_be32(&common->cmnd[2]);
1259 u8 *buf = (u8 *) bh->buf;
1261 if (common->cmnd[1] & ~0x02) { /* Mask away MSF */
1262 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1265 if (lba >= curlun->num_sectors) {
1266 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1271 buf[0] = 0x01; /* 2048 bytes of user data, rest is EC */
1272 store_cdrom_address(&buf[4], msf, lba);
1277 static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh)
1279 struct fsg_lun *curlun = common->curlun;
1280 int msf = common->cmnd[1] & 0x02;
1281 int start_track = common->cmnd[6];
1282 u8 *buf = (u8 *) bh->buf;
1284 if ((common->cmnd[1] & ~0x02) != 0 || /* Mask away MSF */
1286 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1291 buf[1] = (20-2); /* TOC data length */
1292 buf[2] = 1; /* First track number */
1293 buf[3] = 1; /* Last track number */
1294 buf[5] = 0x16; /* Data track, copying allowed */
1295 buf[6] = 0x01; /* Only track is number 1 */
1296 store_cdrom_address(&buf[8], msf, 0);
1298 buf[13] = 0x16; /* Lead-out track is data */
1299 buf[14] = 0xAA; /* Lead-out track number */
1300 store_cdrom_address(&buf[16], msf, curlun->num_sectors);
1305 static int do_mode_sense(struct fsg_common *common, struct fsg_buffhd *bh)
1307 struct fsg_lun *curlun = common->curlun;
1308 int mscmnd = common->cmnd[0];
1309 u8 *buf = (u8 *) bh->buf;
1312 int changeable_values, all_pages;
1316 if ((common->cmnd[1] & ~0x08) != 0) { /* Mask away DBD */
1317 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1320 pc = common->cmnd[2] >> 6;
1321 page_code = common->cmnd[2] & 0x3f;
1323 curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED;
1326 changeable_values = (pc == 1);
1327 all_pages = (page_code == 0x3f);
1329 /* Write the mode parameter header. Fixed values are: default
1330 * medium type, no cache control (DPOFUA), and no block descriptors.
1331 * The only variable value is the WriteProtect bit. We will fill in
1332 * the mode data length later. */
1334 if (mscmnd == SC_MODE_SENSE_6) {
1335 buf[2] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */
1338 } else { /* SC_MODE_SENSE_10 */
1339 buf[3] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */
1341 limit = 65535; /* Should really be FSG_BUFLEN */
1344 /* No block descriptors */
1346 /* The mode pages, in numerical order. The only page we support
1347 * is the Caching page. */
1348 if (page_code == 0x08 || all_pages) {
1350 buf[0] = 0x08; /* Page code */
1351 buf[1] = 10; /* Page length */
1352 memset(buf+2, 0, 10); /* None of the fields are changeable */
1354 if (!changeable_values) {
1355 buf[2] = 0x04; /* Write cache enable, */
1356 /* Read cache not disabled */
1357 /* No cache retention priorities */
1358 put_unaligned_be16(0xffff, &buf[4]);
1359 /* Don't disable prefetch */
1360 /* Minimum prefetch = 0 */
1361 put_unaligned_be16(0xffff, &buf[8]);
1362 /* Maximum prefetch */
1363 put_unaligned_be16(0xffff, &buf[10]);
1364 /* Maximum prefetch ceiling */
1369 /* Check that a valid page was requested and the mode data length
1370 * isn't too long. */
1372 if (!valid_page || len > limit) {
1373 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1377 /* Store the mode data length */
1378 if (mscmnd == SC_MODE_SENSE_6)
1381 put_unaligned_be16(len - 2, buf0);
1386 static int do_start_stop(struct fsg_common *common)
1388 if (!common->curlun) {
1390 } else if (!common->curlun->removable) {
1391 common->curlun->sense_data = SS_INVALID_COMMAND;
1398 static int do_prevent_allow(struct fsg_common *common)
1400 struct fsg_lun *curlun = common->curlun;
1403 if (!common->curlun) {
1405 } else if (!common->curlun->removable) {
1406 common->curlun->sense_data = SS_INVALID_COMMAND;
1410 prevent = common->cmnd[4] & 0x01;
1411 if ((common->cmnd[4] & ~0x01) != 0) { /* Mask away Prevent */
1412 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1416 if (curlun->prevent_medium_removal && !prevent)
1417 fsg_lun_fsync_sub(curlun);
1418 curlun->prevent_medium_removal = prevent;
1423 static int do_read_format_capacities(struct fsg_common *common,
1424 struct fsg_buffhd *bh)
1426 struct fsg_lun *curlun = common->curlun;
1427 u8 *buf = (u8 *) bh->buf;
1429 buf[0] = buf[1] = buf[2] = 0;
1430 buf[3] = 8; /* Only the Current/Maximum Capacity Descriptor */
1433 put_unaligned_be32(curlun->num_sectors, &buf[0]);
1434 /* Number of blocks */
1435 put_unaligned_be32(512, &buf[4]); /* Block length */
1436 buf[4] = 0x02; /* Current capacity */
1441 static int do_mode_select(struct fsg_common *common, struct fsg_buffhd *bh)
1443 struct fsg_lun *curlun = common->curlun;
1445 /* We don't support MODE SELECT */
1447 curlun->sense_data = SS_INVALID_COMMAND;
1452 /*-------------------------------------------------------------------------*/
1454 static int halt_bulk_in_endpoint(struct fsg_dev *fsg)
1458 rc = fsg_set_halt(fsg, fsg->bulk_in);
1460 VDBG(fsg, "delayed bulk-in endpoint halt\n");
1462 if (rc != -EAGAIN) {
1463 WARNING(fsg, "usb_ep_set_halt -> %d\n", rc);
1468 /* Wait for a short time and then try again */
1469 if (msleep_interruptible(100) != 0)
1471 rc = usb_ep_set_halt(fsg->bulk_in);
1476 static int wedge_bulk_in_endpoint(struct fsg_dev *fsg)
1480 DBG(fsg, "bulk-in set wedge\n");
1481 rc = usb_ep_set_wedge(fsg->bulk_in);
1483 VDBG(fsg, "delayed bulk-in endpoint wedge\n");
1485 if (rc != -EAGAIN) {
1486 WARNING(fsg, "usb_ep_set_wedge -> %d\n", rc);
1491 /* Wait for a short time and then try again */
1492 if (msleep_interruptible(100) != 0)
1494 rc = usb_ep_set_wedge(fsg->bulk_in);
1499 static int pad_with_zeros(struct fsg_dev *fsg)
1501 struct fsg_buffhd *bh = fsg->common->next_buffhd_to_fill;
1502 u32 nkeep = bh->inreq->length;
1506 bh->state = BUF_STATE_EMPTY; /* For the first iteration */
1507 fsg->common->usb_amount_left = nkeep + fsg->common->residue;
1508 while (fsg->common->usb_amount_left > 0) {
1510 /* Wait for the next buffer to be free */
1511 while (bh->state != BUF_STATE_EMPTY) {
1512 rc = sleep_thread(fsg->common);
1517 nsend = min(fsg->common->usb_amount_left, FSG_BUFLEN);
1518 memset(bh->buf + nkeep, 0, nsend - nkeep);
1519 bh->inreq->length = nsend;
1520 bh->inreq->zero = 0;
1521 start_transfer(fsg, fsg->bulk_in, bh->inreq,
1522 &bh->inreq_busy, &bh->state);
1523 bh = fsg->common->next_buffhd_to_fill = bh->next;
1524 fsg->common->usb_amount_left -= nsend;
1530 static int throw_away_data(struct fsg_common *common)
1532 struct fsg_buffhd *bh;
1536 for (bh = common->next_buffhd_to_drain;
1537 bh->state != BUF_STATE_EMPTY || common->usb_amount_left > 0;
1538 bh = common->next_buffhd_to_drain) {
1540 /* Throw away the data in a filled buffer */
1541 if (bh->state == BUF_STATE_FULL) {
1543 bh->state = BUF_STATE_EMPTY;
1544 common->next_buffhd_to_drain = bh->next;
1546 /* A short packet or an error ends everything */
1547 if (bh->outreq->actual != bh->outreq->length ||
1548 bh->outreq->status != 0) {
1549 raise_exception(common,
1550 FSG_STATE_ABORT_BULK_OUT);
1556 /* Try to submit another request if we need one */
1557 bh = common->next_buffhd_to_fill;
1558 if (bh->state == BUF_STATE_EMPTY
1559 && common->usb_amount_left > 0) {
1560 amount = min(common->usb_amount_left, FSG_BUFLEN);
1562 /* amount is always divisible by 512, hence by
1563 * the bulk-out maxpacket size */
1564 bh->outreq->length = amount;
1565 bh->bulk_out_intended_length = amount;
1566 bh->outreq->short_not_ok = 1;
1567 START_TRANSFER_OR(common, bulk_out, bh->outreq,
1568 &bh->outreq_busy, &bh->state)
1569 /* Don't know what to do if
1570 * common->fsg is NULL */
1572 common->next_buffhd_to_fill = bh->next;
1573 common->usb_amount_left -= amount;
1577 /* Otherwise wait for something to happen */
1578 rc = sleep_thread(common);
1586 static int finish_reply(struct fsg_common *common)
1588 struct fsg_buffhd *bh = common->next_buffhd_to_fill;
1591 switch (common->data_dir) {
1593 break; /* Nothing to send */
1595 /* If we don't know whether the host wants to read or write,
1596 * this must be CB or CBI with an unknown command. We mustn't
1597 * try to send or receive any data. So stall both bulk pipes
1598 * if we can and wait for a reset. */
1599 case DATA_DIR_UNKNOWN:
1600 if (!common->can_stall) {
1602 } else if (fsg_is_set(common)) {
1603 fsg_set_halt(common->fsg, common->fsg->bulk_out);
1604 rc = halt_bulk_in_endpoint(common->fsg);
1606 /* Don't know what to do if common->fsg is NULL */
1611 /* All but the last buffer of data must have already been sent */
1612 case DATA_DIR_TO_HOST:
1613 if (common->data_size == 0) {
1614 /* Nothing to send */
1616 /* If there's no residue, simply send the last buffer */
1617 } else if (common->residue == 0) {
1618 bh->inreq->zero = 0;
1619 START_TRANSFER_OR(common, bulk_in, bh->inreq,
1620 &bh->inreq_busy, &bh->state)
1622 common->next_buffhd_to_fill = bh->next;
1624 /* For Bulk-only, if we're allowed to stall then send the
1625 * short packet and halt the bulk-in endpoint. If we can't
1626 * stall, pad out the remaining data with 0's. */
1627 } else if (common->can_stall) {
1628 bh->inreq->zero = 1;
1629 START_TRANSFER_OR(common, bulk_in, bh->inreq,
1630 &bh->inreq_busy, &bh->state)
1631 /* Don't know what to do if
1632 * common->fsg is NULL */
1634 common->next_buffhd_to_fill = bh->next;
1636 rc = halt_bulk_in_endpoint(common->fsg);
1637 } else if (fsg_is_set(common)) {
1638 rc = pad_with_zeros(common->fsg);
1640 /* Don't know what to do if common->fsg is NULL */
1645 /* We have processed all we want from the data the host has sent.
1646 * There may still be outstanding bulk-out requests. */
1647 case DATA_DIR_FROM_HOST:
1648 if (common->residue == 0) {
1649 /* Nothing to receive */
1651 /* Did the host stop sending unexpectedly early? */
1652 } else if (common->short_packet_received) {
1653 raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
1656 /* We haven't processed all the incoming data. Even though
1657 * we may be allowed to stall, doing so would cause a race.
1658 * The controller may already have ACK'ed all the remaining
1659 * bulk-out packets, in which case the host wouldn't see a
1660 * STALL. Not realizing the endpoint was halted, it wouldn't
1661 * clear the halt -- leading to problems later on. */
1663 } else if (common->can_stall) {
1664 if (fsg_is_set(common))
1665 fsg_set_halt(common->fsg,
1666 common->fsg->bulk_out);
1667 raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
1671 /* We can't stall. Read in the excess data and throw it
1674 rc = throw_away_data(common);
1682 static int send_status(struct fsg_common *common)
1684 struct fsg_lun *curlun = common->curlun;
1685 struct fsg_buffhd *bh;
1686 struct bulk_cs_wrap *csw;
1688 u8 status = USB_STATUS_PASS;
1691 /* Wait for the next buffer to become available */
1692 bh = common->next_buffhd_to_fill;
1693 while (bh->state != BUF_STATE_EMPTY) {
1694 rc = sleep_thread(common);
1700 sd = curlun->sense_data;
1701 sdinfo = curlun->sense_data_info;
1702 } else if (common->bad_lun_okay)
1705 sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
1707 if (common->phase_error) {
1708 DBG(common, "sending phase-error status\n");
1709 status = USB_STATUS_PHASE_ERROR;
1710 sd = SS_INVALID_COMMAND;
1711 } else if (sd != SS_NO_SENSE) {
1712 DBG(common, "sending command-failure status\n");
1713 status = USB_STATUS_FAIL;
1714 VDBG(common, " sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
1716 SK(sd), ASC(sd), ASCQ(sd), sdinfo);
1719 /* Store and send the Bulk-only CSW */
1720 csw = (void *)bh->buf;
1722 csw->Signature = cpu_to_le32(USB_BULK_CS_SIG);
1723 csw->Tag = common->tag;
1724 csw->Residue = cpu_to_le32(common->residue);
1725 csw->Status = status;
1727 bh->inreq->length = USB_BULK_CS_WRAP_LEN;
1728 bh->inreq->zero = 0;
1729 START_TRANSFER_OR(common, bulk_in, bh->inreq,
1730 &bh->inreq_busy, &bh->state)
1731 /* Don't know what to do if common->fsg is NULL */
1734 common->next_buffhd_to_fill = bh->next;
1739 /*-------------------------------------------------------------------------*/
1741 /* Check whether the command is properly formed and whether its data size
1742 * and direction agree with the values we already have. */
1743 static int check_command(struct fsg_common *common, int cmnd_size,
1744 enum data_direction data_dir, unsigned int mask,
1745 int needs_medium, const char *name)
1748 int lun = common->cmnd[1] >> 5;
1749 static const char dirletter[4] = {'u', 'o', 'i', 'n'};
1751 struct fsg_lun *curlun;
1754 if (common->data_dir != DATA_DIR_UNKNOWN)
1755 sprintf(hdlen, ", H%c=%u", dirletter[(int) common->data_dir],
1757 VDBG(common, "SCSI command: %s; Dc=%d, D%c=%u; Hc=%d%s\n",
1758 name, cmnd_size, dirletter[(int) data_dir],
1759 common->data_size_from_cmnd, common->cmnd_size, hdlen);
1761 /* We can't reply at all until we know the correct data direction
1763 if (common->data_size_from_cmnd == 0)
1764 data_dir = DATA_DIR_NONE;
1765 if (common->data_size < common->data_size_from_cmnd) {
1766 /* Host data size < Device data size is a phase error.
1767 * Carry out the command, but only transfer as much as
1768 * we are allowed. */
1769 common->data_size_from_cmnd = common->data_size;
1770 common->phase_error = 1;
1772 common->residue = common->data_size;
1773 common->usb_amount_left = common->data_size;
1775 /* Conflicting data directions is a phase error */
1776 if (common->data_dir != data_dir
1777 && common->data_size_from_cmnd > 0) {
1778 common->phase_error = 1;
1782 /* Verify the length of the command itself */
1783 if (cmnd_size != common->cmnd_size) {
1785 /* Special case workaround: There are plenty of buggy SCSI
1786 * implementations. Many have issues with cbw->Length
1787 * field passing a wrong command size. For those cases we
1788 * always try to work around the problem by using the length
1789 * sent by the host side provided it is at least as large
1790 * as the correct command length.
1791 * Examples of such cases would be MS-Windows, which issues
1792 * REQUEST SENSE with cbw->Length == 12 where it should
1793 * be 6, and xbox360 issuing INQUIRY, TEST UNIT READY and
1794 * REQUEST SENSE with cbw->Length == 10 where it should
1797 if (cmnd_size <= common->cmnd_size) {
1798 DBG(common, "%s is buggy! Expected length %d "
1799 "but we got %d\n", name,
1800 cmnd_size, common->cmnd_size);
1801 cmnd_size = common->cmnd_size;
1803 common->phase_error = 1;
1808 /* Check that the LUN values are consistent */
1809 if (common->lun != lun)
1810 DBG(common, "using LUN %d from CBW, not LUN %d from CDB\n",
1814 if (common->lun >= 0 && common->lun < common->nluns) {
1815 curlun = &common->luns[common->lun];
1816 common->curlun = curlun;
1817 if (common->cmnd[0] != SC_REQUEST_SENSE) {
1818 curlun->sense_data = SS_NO_SENSE;
1819 curlun->sense_data_info = 0;
1820 curlun->info_valid = 0;
1823 common->curlun = NULL;
1825 common->bad_lun_okay = 0;
1827 /* INQUIRY and REQUEST SENSE commands are explicitly allowed
1828 * to use unsupported LUNs; all others may not. */
1829 if (common->cmnd[0] != SC_INQUIRY &&
1830 common->cmnd[0] != SC_REQUEST_SENSE) {
1831 DBG(common, "unsupported LUN %d\n", common->lun);
1836 /* If a unit attention condition exists, only INQUIRY and
1837 * REQUEST SENSE commands are allowed; anything else must fail. */
1838 if (curlun && curlun->unit_attention_data != SS_NO_SENSE &&
1839 common->cmnd[0] != SC_INQUIRY &&
1840 common->cmnd[0] != SC_REQUEST_SENSE) {
1841 curlun->sense_data = curlun->unit_attention_data;
1842 curlun->unit_attention_data = SS_NO_SENSE;
1846 /* Check that only command bytes listed in the mask are non-zero */
1847 common->cmnd[1] &= 0x1f; /* Mask away the LUN */
1848 for (i = 1; i < cmnd_size; ++i) {
1849 if (common->cmnd[i] && !(mask & (1 << i))) {
1851 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1856 /* If the medium isn't mounted and the command needs to access
1857 * it, return an error. */
1858 if (curlun && !fsg_lun_is_open(curlun) && needs_medium) {
1859 curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
1867 static int do_scsi_command(struct fsg_common *common)
1869 struct fsg_buffhd *bh;
1871 int reply = -EINVAL;
1873 static char unknown[16];
1877 /* Wait for the next buffer to become available for data or status */
1878 bh = common->next_buffhd_to_fill;
1879 common->next_buffhd_to_drain = bh;
1880 while (bh->state != BUF_STATE_EMPTY) {
1881 rc = sleep_thread(common);
1885 common->phase_error = 0;
1886 common->short_packet_received = 0;
1888 down_read(&common->filesem); /* We're using the backing file */
1889 switch (common->cmnd[0]) {
1892 common->data_size_from_cmnd = common->cmnd[4];
1893 reply = check_command(common, 6, DATA_DIR_TO_HOST,
1897 reply = do_inquiry(common, bh);
1900 case SC_MODE_SELECT_6:
1901 common->data_size_from_cmnd = common->cmnd[4];
1902 reply = check_command(common, 6, DATA_DIR_FROM_HOST,
1906 reply = do_mode_select(common, bh);
1909 case SC_MODE_SELECT_10:
1910 common->data_size_from_cmnd =
1911 get_unaligned_be16(&common->cmnd[7]);
1912 reply = check_command(common, 10, DATA_DIR_FROM_HOST,
1916 reply = do_mode_select(common, bh);
1919 case SC_MODE_SENSE_6:
1920 common->data_size_from_cmnd = common->cmnd[4];
1921 reply = check_command(common, 6, DATA_DIR_TO_HOST,
1922 (1<<1) | (1<<2) | (1<<4), 0,
1925 reply = do_mode_sense(common, bh);
1928 case SC_MODE_SENSE_10:
1929 common->data_size_from_cmnd =
1930 get_unaligned_be16(&common->cmnd[7]);
1931 reply = check_command(common, 10, DATA_DIR_TO_HOST,
1932 (1<<1) | (1<<2) | (3<<7), 0,
1935 reply = do_mode_sense(common, bh);
1938 case SC_PREVENT_ALLOW_MEDIUM_REMOVAL:
1939 common->data_size_from_cmnd = 0;
1940 reply = check_command(common, 6, DATA_DIR_NONE,
1942 "PREVENT-ALLOW MEDIUM REMOVAL");
1944 reply = do_prevent_allow(common);
1948 i = common->cmnd[4];
1949 common->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
1950 reply = check_command(common, 6, DATA_DIR_TO_HOST,
1954 reply = do_read(common);
1958 common->data_size_from_cmnd =
1959 get_unaligned_be16(&common->cmnd[7]) << 9;
1960 reply = check_command(common, 10, DATA_DIR_TO_HOST,
1961 (1<<1) | (0xf<<2) | (3<<7), 1,
1964 reply = do_read(common);
1968 common->data_size_from_cmnd =
1969 get_unaligned_be32(&common->cmnd[6]) << 9;
1970 reply = check_command(common, 12, DATA_DIR_TO_HOST,
1971 (1<<1) | (0xf<<2) | (0xf<<6), 1,
1974 reply = do_read(common);
1977 case SC_READ_CAPACITY:
1978 common->data_size_from_cmnd = 8;
1979 reply = check_command(common, 10, DATA_DIR_TO_HOST,
1980 (0xf<<2) | (1<<8), 1,
1983 reply = do_read_capacity(common, bh);
1986 case SC_READ_HEADER:
1987 if (!common->curlun || !common->curlun->cdrom)
1989 common->data_size_from_cmnd =
1990 get_unaligned_be16(&common->cmnd[7]);
1991 reply = check_command(common, 10, DATA_DIR_TO_HOST,
1992 (3<<7) | (0x1f<<1), 1,
1995 reply = do_read_header(common, bh);
1999 if (!common->curlun || !common->curlun->cdrom)
2001 common->data_size_from_cmnd =
2002 get_unaligned_be16(&common->cmnd[7]);
2003 reply = check_command(common, 10, DATA_DIR_TO_HOST,
2007 reply = do_read_toc(common, bh);
2010 case SC_READ_FORMAT_CAPACITIES:
2011 common->data_size_from_cmnd =
2012 get_unaligned_be16(&common->cmnd[7]);
2013 reply = check_command(common, 10, DATA_DIR_TO_HOST,
2015 "READ FORMAT CAPACITIES");
2017 reply = do_read_format_capacities(common, bh);
2020 case SC_REQUEST_SENSE:
2021 common->data_size_from_cmnd = common->cmnd[4];
2022 reply = check_command(common, 6, DATA_DIR_TO_HOST,
2026 reply = do_request_sense(common, bh);
2029 case SC_START_STOP_UNIT:
2030 common->data_size_from_cmnd = 0;
2031 reply = check_command(common, 6, DATA_DIR_NONE,
2035 reply = do_start_stop(common);
2038 case SC_SYNCHRONIZE_CACHE:
2039 common->data_size_from_cmnd = 0;
2040 reply = check_command(common, 10, DATA_DIR_NONE,
2041 (0xf<<2) | (3<<7), 1,
2042 "SYNCHRONIZE CACHE");
2044 reply = do_synchronize_cache(common);
2047 case SC_TEST_UNIT_READY:
2048 common->data_size_from_cmnd = 0;
2049 reply = check_command(common, 6, DATA_DIR_NONE,
2054 /* Although optional, this command is used by MS-Windows. We
2055 * support a minimal version: BytChk must be 0. */
2057 common->data_size_from_cmnd = 0;
2058 reply = check_command(common, 10, DATA_DIR_NONE,
2059 (1<<1) | (0xf<<2) | (3<<7), 1,
2062 reply = do_verify(common);
2066 i = common->cmnd[4];
2067 common->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
2068 reply = check_command(common, 6, DATA_DIR_FROM_HOST,
2072 reply = do_write(common);
2076 common->data_size_from_cmnd =
2077 get_unaligned_be16(&common->cmnd[7]) << 9;
2078 reply = check_command(common, 10, DATA_DIR_FROM_HOST,
2079 (1<<1) | (0xf<<2) | (3<<7), 1,
2082 reply = do_write(common);
2086 common->data_size_from_cmnd =
2087 get_unaligned_be32(&common->cmnd[6]) << 9;
2088 reply = check_command(common, 12, DATA_DIR_FROM_HOST,
2089 (1<<1) | (0xf<<2) | (0xf<<6), 1,
2092 reply = do_write(common);
2095 /* Some mandatory commands that we recognize but don't implement.
2096 * They don't mean much in this setting. It's left as an exercise
2097 * for anyone interested to implement RESERVE and RELEASE in terms
2098 * of Posix locks. */
2099 case SC_FORMAT_UNIT:
2102 case SC_SEND_DIAGNOSTIC:
2107 common->data_size_from_cmnd = 0;
2108 sprintf(unknown, "Unknown x%02x", common->cmnd[0]);
2109 reply = check_command(common, common->cmnd_size,
2110 DATA_DIR_UNKNOWN, 0xff, 0, unknown);
2112 common->curlun->sense_data = SS_INVALID_COMMAND;
2117 up_read(&common->filesem);
2119 if (reply == -EINTR || signal_pending(current))
2122 /* Set up the single reply buffer for finish_reply() */
2123 if (reply == -EINVAL)
2124 reply = 0; /* Error reply length */
2125 if (reply >= 0 && common->data_dir == DATA_DIR_TO_HOST) {
2126 reply = min((u32) reply, common->data_size_from_cmnd);
2127 bh->inreq->length = reply;
2128 bh->state = BUF_STATE_FULL;
2129 common->residue -= reply;
2130 } /* Otherwise it's already set */
2136 /*-------------------------------------------------------------------------*/
2138 static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2140 struct usb_request *req = bh->outreq;
2141 struct fsg_bulk_cb_wrap *cbw = req->buf;
2142 struct fsg_common *common = fsg->common;
2144 /* Was this a real packet? Should it be ignored? */
2145 if (req->status || test_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags))
2148 /* Is the CBW valid? */
2149 if (req->actual != USB_BULK_CB_WRAP_LEN ||
2150 cbw->Signature != cpu_to_le32(
2152 DBG(fsg, "invalid CBW: len %u sig 0x%x\n",
2154 le32_to_cpu(cbw->Signature));
2156 /* The Bulk-only spec says we MUST stall the IN endpoint
2157 * (6.6.1), so it's unavoidable. It also says we must
2158 * retain this state until the next reset, but there's
2159 * no way to tell the controller driver it should ignore
2160 * Clear-Feature(HALT) requests.
2162 * We aren't required to halt the OUT endpoint; instead
2163 * we can simply accept and discard any data received
2164 * until the next reset. */
2165 wedge_bulk_in_endpoint(fsg);
2166 set_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
2170 /* Is the CBW meaningful? */
2171 if (cbw->Lun >= FSG_MAX_LUNS || cbw->Flags & ~USB_BULK_IN_FLAG ||
2172 cbw->Length <= 0 || cbw->Length > MAX_COMMAND_SIZE) {
2173 DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
2175 cbw->Lun, cbw->Flags, cbw->Length);
2177 /* We can do anything we want here, so let's stall the
2178 * bulk pipes if we are allowed to. */
2179 if (common->can_stall) {
2180 fsg_set_halt(fsg, fsg->bulk_out);
2181 halt_bulk_in_endpoint(fsg);
2186 /* Save the command for later */
2187 common->cmnd_size = cbw->Length;
2188 memcpy(common->cmnd, cbw->CDB, common->cmnd_size);
2189 if (cbw->Flags & USB_BULK_IN_FLAG)
2190 common->data_dir = DATA_DIR_TO_HOST;
2192 common->data_dir = DATA_DIR_FROM_HOST;
2193 common->data_size = le32_to_cpu(cbw->DataTransferLength);
2194 if (common->data_size == 0)
2195 common->data_dir = DATA_DIR_NONE;
2196 common->lun = cbw->Lun;
2197 common->tag = cbw->Tag;
2202 static int get_next_command(struct fsg_common *common)
2204 struct fsg_buffhd *bh;
2207 /* Wait for the next buffer to become available */
2208 bh = common->next_buffhd_to_fill;
2209 while (bh->state != BUF_STATE_EMPTY) {
2210 rc = sleep_thread(common);
2215 /* Queue a request to read a Bulk-only CBW */
2216 set_bulk_out_req_length(common, bh, USB_BULK_CB_WRAP_LEN);
2217 bh->outreq->short_not_ok = 1;
2218 START_TRANSFER_OR(common, bulk_out, bh->outreq,
2219 &bh->outreq_busy, &bh->state)
2220 /* Don't know what to do if common->fsg is NULL */
2223 /* We will drain the buffer in software, which means we
2224 * can reuse it for the next filling. No need to advance
2225 * next_buffhd_to_fill. */
2227 /* Wait for the CBW to arrive */
2228 while (bh->state != BUF_STATE_FULL) {
2229 rc = sleep_thread(common);
2234 rc = fsg_is_set(common) ? received_cbw(common->fsg, bh) : -EIO;
2235 bh->state = BUF_STATE_EMPTY;
2241 /*-------------------------------------------------------------------------*/
2243 static int enable_endpoint(struct fsg_common *common, struct usb_ep *ep,
2244 const struct usb_endpoint_descriptor *d)
2248 ep->driver_data = common;
2249 rc = usb_ep_enable(ep, d);
2251 ERROR(common, "can't enable %s, result %d\n", ep->name, rc);
2255 static int alloc_request(struct fsg_common *common, struct usb_ep *ep,
2256 struct usb_request **preq)
2258 *preq = usb_ep_alloc_request(ep, GFP_ATOMIC);
2261 ERROR(common, "can't allocate request for %s\n", ep->name);
2266 * Reset interface setting and re-init endpoint state (toggle etc).
2267 * Call with altsetting < 0 to disable the interface. The only other
2268 * available altsetting is 0, which enables the interface.
2270 static int do_set_interface(struct fsg_common *common, int altsetting)
2274 const struct usb_endpoint_descriptor *d;
2276 if (common->running)
2277 DBG(common, "reset interface\n");
2280 /* Deallocate the requests */
2281 if (common->prev_fsg) {
2282 struct fsg_dev *fsg = common->prev_fsg;
2284 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2285 struct fsg_buffhd *bh = &common->buffhds[i];
2288 usb_ep_free_request(fsg->bulk_in, bh->inreq);
2292 usb_ep_free_request(fsg->bulk_out, bh->outreq);
2297 /* Disable the endpoints */
2298 if (fsg->bulk_in_enabled) {
2299 usb_ep_disable(fsg->bulk_in);
2300 fsg->bulk_in_enabled = 0;
2302 if (fsg->bulk_out_enabled) {
2303 usb_ep_disable(fsg->bulk_out);
2304 fsg->bulk_out_enabled = 0;
2307 common->prev_fsg = 0;
2310 common->running = 0;
2311 if (altsetting < 0 || rc != 0)
2314 DBG(common, "set interface %d\n", altsetting);
2316 if (fsg_is_set(common)) {
2317 struct fsg_dev *fsg = common->fsg;
2318 common->prev_fsg = common->fsg;
2320 /* Enable the endpoints */
2321 d = fsg_ep_desc(common->gadget,
2322 &fsg_fs_bulk_in_desc, &fsg_hs_bulk_in_desc);
2323 rc = enable_endpoint(common, fsg->bulk_in, d);
2326 fsg->bulk_in_enabled = 1;
2328 d = fsg_ep_desc(common->gadget,
2329 &fsg_fs_bulk_out_desc, &fsg_hs_bulk_out_desc);
2330 rc = enable_endpoint(common, fsg->bulk_out, d);
2333 fsg->bulk_out_enabled = 1;
2334 common->bulk_out_maxpacket = le16_to_cpu(d->wMaxPacketSize);
2335 clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
2337 /* Allocate the requests */
2338 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2339 struct fsg_buffhd *bh = &common->buffhds[i];
2341 rc = alloc_request(common, fsg->bulk_in, &bh->inreq);
2344 rc = alloc_request(common, fsg->bulk_out, &bh->outreq);
2347 bh->inreq->buf = bh->outreq->buf = bh->buf;
2348 bh->inreq->context = bh->outreq->context = bh;
2349 bh->inreq->complete = bulk_in_complete;
2350 bh->outreq->complete = bulk_out_complete;
2353 common->running = 1;
2354 for (i = 0; i < common->nluns; ++i)
2355 common->luns[i].unit_attention_data = SS_RESET_OCCURRED;
2364 * Change our operational configuration. This code must agree with the code
2365 * that returns config descriptors, and with interface altsetting code.
2367 * It's also responsible for power management interactions. Some
2368 * configurations might not work with our current power sources.
2369 * For now we just assume the gadget is always self-powered.
2371 static int do_set_config(struct fsg_common *common, u8 new_config)
2375 /* Disable the single interface */
2376 if (common->config != 0) {
2377 DBG(common, "reset config\n");
2379 rc = do_set_interface(common, -1);
2382 /* Enable the interface */
2383 if (new_config != 0) {
2384 common->config = new_config;
2385 rc = do_set_interface(common, 0);
2387 common->config = 0; /* Reset on errors */
2393 /****************************** ALT CONFIGS ******************************/
2396 static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
2398 struct fsg_dev *fsg = fsg_from_func(f);
2399 fsg->common->prev_fsg = fsg->common->fsg;
2400 fsg->common->fsg = fsg;
2401 fsg->common->new_config = 1;
2402 raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
2406 static void fsg_disable(struct usb_function *f)
2408 struct fsg_dev *fsg = fsg_from_func(f);
2409 fsg->common->prev_fsg = fsg->common->fsg;
2410 fsg->common->fsg = fsg;
2411 fsg->common->new_config = 0;
2412 raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
2416 /*-------------------------------------------------------------------------*/
2418 static void handle_exception(struct fsg_common *common)
2423 struct fsg_buffhd *bh;
2424 enum fsg_state old_state;
2426 struct fsg_lun *curlun;
2427 unsigned int exception_req_tag;
2430 /* Clear the existing signals. Anything but SIGUSR1 is converted
2431 * into a high-priority EXIT exception. */
2433 sig = dequeue_signal_lock(current, ¤t->blocked, &info);
2436 if (sig != SIGUSR1) {
2437 if (common->state < FSG_STATE_EXIT)
2438 DBG(common, "Main thread exiting on signal\n");
2439 raise_exception(common, FSG_STATE_EXIT);
2443 /* Cancel all the pending transfers */
2444 if (fsg_is_set(common)) {
2445 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2446 bh = &common->buffhds[i];
2448 usb_ep_dequeue(common->fsg->bulk_in, bh->inreq);
2449 if (bh->outreq_busy)
2450 usb_ep_dequeue(common->fsg->bulk_out,
2454 /* Wait until everything is idle */
2457 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2458 bh = &common->buffhds[i];
2459 num_active += bh->inreq_busy + bh->outreq_busy;
2461 if (num_active == 0)
2463 if (sleep_thread(common))
2467 /* Clear out the controller's fifos */
2468 if (common->fsg->bulk_in_enabled)
2469 usb_ep_fifo_flush(common->fsg->bulk_in);
2470 if (common->fsg->bulk_out_enabled)
2471 usb_ep_fifo_flush(common->fsg->bulk_out);
2474 /* Reset the I/O buffer states and pointers, the SCSI
2475 * state, and the exception. Then invoke the handler. */
2476 spin_lock_irq(&common->lock);
2478 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2479 bh = &common->buffhds[i];
2480 bh->state = BUF_STATE_EMPTY;
2482 common->next_buffhd_to_fill = &common->buffhds[0];
2483 common->next_buffhd_to_drain = &common->buffhds[0];
2484 exception_req_tag = common->exception_req_tag;
2485 new_config = common->new_config;
2486 old_state = common->state;
2488 if (old_state == FSG_STATE_ABORT_BULK_OUT)
2489 common->state = FSG_STATE_STATUS_PHASE;
2491 for (i = 0; i < common->nluns; ++i) {
2492 curlun = &common->luns[i];
2493 curlun->prevent_medium_removal = 0;
2494 curlun->sense_data = SS_NO_SENSE;
2495 curlun->unit_attention_data = SS_NO_SENSE;
2496 curlun->sense_data_info = 0;
2497 curlun->info_valid = 0;
2499 common->state = FSG_STATE_IDLE;
2501 spin_unlock_irq(&common->lock);
2503 /* Carry out any extra actions required for the exception */
2504 switch (old_state) {
2505 case FSG_STATE_ABORT_BULK_OUT:
2506 send_status(common);
2507 spin_lock_irq(&common->lock);
2508 if (common->state == FSG_STATE_STATUS_PHASE)
2509 common->state = FSG_STATE_IDLE;
2510 spin_unlock_irq(&common->lock);
2513 case FSG_STATE_RESET:
2514 /* In case we were forced against our will to halt a
2515 * bulk endpoint, clear the halt now. (The SuperH UDC
2516 * requires this.) */
2517 if (!fsg_is_set(common))
2519 if (test_and_clear_bit(IGNORE_BULK_OUT,
2520 &common->fsg->atomic_bitflags))
2521 usb_ep_clear_halt(common->fsg->bulk_in);
2523 if (common->ep0_req_tag == exception_req_tag)
2524 ep0_queue(common); /* Complete the status stage */
2526 /* Technically this should go here, but it would only be
2527 * a waste of time. Ditto for the INTERFACE_CHANGE and
2528 * CONFIG_CHANGE cases. */
2529 /* for (i = 0; i < common->nluns; ++i) */
2530 /* common->luns[i].unit_attention_data = */
2531 /* SS_RESET_OCCURRED; */
2534 case FSG_STATE_CONFIG_CHANGE:
2535 rc = do_set_config(common, new_config);
2538 case FSG_STATE_EXIT:
2539 case FSG_STATE_TERMINATED:
2540 do_set_config(common, 0); /* Free resources */
2541 spin_lock_irq(&common->lock);
2542 common->state = FSG_STATE_TERMINATED; /* Stop the thread */
2543 spin_unlock_irq(&common->lock);
2546 case FSG_STATE_INTERFACE_CHANGE:
2547 case FSG_STATE_DISCONNECT:
2548 case FSG_STATE_COMMAND_PHASE:
2549 case FSG_STATE_DATA_PHASE:
2550 case FSG_STATE_STATUS_PHASE:
2551 case FSG_STATE_IDLE:
2557 /*-------------------------------------------------------------------------*/
2559 static int fsg_main_thread(void *common_)
2561 struct fsg_common *common = common_;
2563 /* Allow the thread to be killed by a signal, but set the signal mask
2564 * to block everything but INT, TERM, KILL, and USR1. */
2565 allow_signal(SIGINT);
2566 allow_signal(SIGTERM);
2567 allow_signal(SIGKILL);
2568 allow_signal(SIGUSR1);
2570 /* Allow the thread to be frozen */
2573 /* Arrange for userspace references to be interpreted as kernel
2574 * pointers. That way we can pass a kernel pointer to a routine
2575 * that expects a __user pointer and it will work okay. */
2579 while (common->state != FSG_STATE_TERMINATED) {
2580 if (exception_in_progress(common) || signal_pending(current)) {
2581 handle_exception(common);
2585 if (!common->running) {
2586 sleep_thread(common);
2590 if (get_next_command(common))
2593 spin_lock_irq(&common->lock);
2594 if (!exception_in_progress(common))
2595 common->state = FSG_STATE_DATA_PHASE;
2596 spin_unlock_irq(&common->lock);
2598 if (do_scsi_command(common) || finish_reply(common))
2601 spin_lock_irq(&common->lock);
2602 if (!exception_in_progress(common))
2603 common->state = FSG_STATE_STATUS_PHASE;
2604 spin_unlock_irq(&common->lock);
2606 if (send_status(common))
2609 spin_lock_irq(&common->lock);
2610 if (!exception_in_progress(common))
2611 common->state = FSG_STATE_IDLE;
2612 spin_unlock_irq(&common->lock);
2615 spin_lock_irq(&common->lock);
2616 common->thread_task = NULL;
2617 spin_unlock_irq(&common->lock);
2619 if (!common->thread_exits || common->thread_exits(common) < 0) {
2620 struct fsg_lun *curlun = common->luns;
2621 unsigned i = common->nluns;
2623 down_write(&common->filesem);
2624 for (; i--; ++curlun) {
2625 if (!fsg_lun_is_open(curlun))
2628 fsg_lun_close(curlun);
2629 curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
2631 up_write(&common->filesem);
2634 /* Let the unbind and cleanup routines know the thread has exited */
2635 complete_and_exit(&common->thread_notifier, 0);
2639 /*************************** DEVICE ATTRIBUTES ***************************/
2641 /* Write permission is checked per LUN in store_*() functions. */
2642 static DEVICE_ATTR(ro, 0644, fsg_show_ro, fsg_store_ro);
2643 static DEVICE_ATTR(file, 0644, fsg_show_file, fsg_store_file);
2646 /****************************** FSG COMMON ******************************/
2648 static void fsg_common_release(struct kref *ref);
2650 static void fsg_lun_release(struct device *dev)
2652 /* Nothing needs to be done */
2655 static inline void fsg_common_get(struct fsg_common *common)
2657 kref_get(&common->ref);
2660 static inline void fsg_common_put(struct fsg_common *common)
2662 kref_put(&common->ref, fsg_common_release);
2666 static struct fsg_common *fsg_common_init(struct fsg_common *common,
2667 struct usb_composite_dev *cdev,
2668 struct fsg_config *cfg)
2670 struct usb_gadget *gadget = cdev->gadget;
2671 struct fsg_buffhd *bh;
2672 struct fsg_lun *curlun;
2673 struct fsg_lun_config *lcfg;
2677 /* Find out how many LUNs there should be */
2679 if (nluns < 1 || nluns > FSG_MAX_LUNS) {
2680 dev_err(&gadget->dev, "invalid number of LUNs: %u\n", nluns);
2681 return ERR_PTR(-EINVAL);
2686 common = kzalloc(sizeof *common, GFP_KERNEL);
2688 return ERR_PTR(-ENOMEM);
2689 common->free_storage_on_release = 1;
2691 memset(common, 0, sizeof common);
2692 common->free_storage_on_release = 0;
2695 common->private_data = cfg->private_data;
2697 common->gadget = gadget;
2698 common->ep0 = gadget->ep0;
2699 common->ep0req = cdev->req;
2701 /* Maybe allocate device-global string IDs, and patch descriptors */
2702 if (fsg_strings[FSG_STRING_INTERFACE].id == 0) {
2703 rc = usb_string_id(cdev);
2708 fsg_strings[FSG_STRING_INTERFACE].id = rc;
2709 fsg_intf_desc.iInterface = rc;
2712 /* Create the LUNs, open their backing files, and register the
2713 * LUN devices in sysfs. */
2714 curlun = kzalloc(nluns * sizeof *curlun, GFP_KERNEL);
2717 return ERR_PTR(-ENOMEM);
2719 common->luns = curlun;
2721 init_rwsem(&common->filesem);
2723 for (i = 0, lcfg = cfg->luns; i < nluns; ++i, ++curlun, ++lcfg) {
2724 curlun->cdrom = !!lcfg->cdrom;
2725 curlun->ro = lcfg->cdrom || lcfg->ro;
2726 curlun->removable = lcfg->removable;
2727 curlun->dev.release = fsg_lun_release;
2728 curlun->dev.parent = &gadget->dev;
2729 /* curlun->dev.driver = &fsg_driver.driver; XXX */
2730 dev_set_drvdata(&curlun->dev, &common->filesem);
2731 dev_set_name(&curlun->dev,
2732 cfg->lun_name_format
2733 ? cfg->lun_name_format
2737 rc = device_register(&curlun->dev);
2739 INFO(common, "failed to register LUN%d: %d\n", i, rc);
2744 rc = device_create_file(&curlun->dev, &dev_attr_ro);
2747 rc = device_create_file(&curlun->dev, &dev_attr_file);
2751 if (lcfg->filename) {
2752 rc = fsg_lun_open(curlun, lcfg->filename);
2755 } else if (!curlun->removable) {
2756 ERROR(common, "no file given for LUN%d\n", i);
2761 common->nluns = nluns;
2764 /* Data buffers cyclic list */
2765 /* Buffers in buffhds are static -- no need for additional
2767 bh = common->buffhds;
2768 i = FSG_NUM_BUFFERS - 1;
2771 } while (++bh, --i);
2772 bh->next = common->buffhds;
2775 /* Prepare inquiryString */
2776 if (cfg->release != 0xffff) {
2779 i = usb_gadget_controller_number(gadget);
2783 WARNING(common, "controller '%s' not recognized\n",
2788 #define OR(x, y) ((x) ? (x) : (y))
2789 snprintf(common->inquiry_string, sizeof common->inquiry_string,
2791 OR(cfg->vendor_name, "Linux "),
2792 /* Assume product name dependent on the first LUN */
2793 OR(cfg->product_name, common->luns->cdrom
2794 ? "File-Stor Gadget"
2795 : "File-CD Gadget "),
2799 /* Some peripheral controllers are known not to be able to
2800 * halt bulk endpoints correctly. If one of them is present,
2803 common->can_stall = cfg->can_stall &&
2804 !(gadget_is_at91(common->gadget));
2807 spin_lock_init(&common->lock);
2808 kref_init(&common->ref);
2811 /* Tell the thread to start working */
2812 common->thread_exits = cfg->thread_exits;
2813 common->thread_task =
2814 kthread_create(fsg_main_thread, common,
2815 OR(cfg->thread_name, "file-storage"));
2816 if (IS_ERR(common->thread_task)) {
2817 rc = PTR_ERR(common->thread_task);
2820 init_completion(&common->thread_notifier);
2825 INFO(common, FSG_DRIVER_DESC ", version: " FSG_DRIVER_VERSION "\n");
2826 INFO(common, "Number of LUNs=%d\n", common->nluns);
2828 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
2829 for (i = 0, nluns = common->nluns, curlun = common->luns;
2832 char *p = "(no medium)";
2833 if (fsg_lun_is_open(curlun)) {
2836 p = d_path(&curlun->filp->f_path,
2842 LINFO(curlun, "LUN: %s%s%sfile: %s\n",
2843 curlun->removable ? "removable " : "",
2844 curlun->ro ? "read only " : "",
2845 curlun->cdrom ? "CD-ROM " : "",
2850 DBG(common, "I/O thread pid: %d\n", task_pid_nr(common->thread_task));
2852 wake_up_process(common->thread_task);
2858 common->nluns = i + 1;
2860 common->state = FSG_STATE_TERMINATED; /* The thread is dead */
2861 /* Call fsg_common_release() directly, ref might be not
2863 fsg_common_release(&common->ref);
2868 static void fsg_common_release(struct kref *ref)
2870 struct fsg_common *common =
2871 container_of(ref, struct fsg_common, ref);
2872 unsigned i = common->nluns;
2873 struct fsg_lun *lun = common->luns;
2875 /* If the thread isn't already dead, tell it to exit now */
2876 if (common->state != FSG_STATE_TERMINATED) {
2877 raise_exception(common, FSG_STATE_EXIT);
2878 wait_for_completion(&common->thread_notifier);
2880 /* The cleanup routine waits for this completion also */
2881 complete(&common->thread_notifier);
2884 /* Beware tempting for -> do-while optimization: when in error
2885 * recovery nluns may be zero. */
2887 for (; i; --i, ++lun) {
2888 device_remove_file(&lun->dev, &dev_attr_ro);
2889 device_remove_file(&lun->dev, &dev_attr_file);
2891 device_unregister(&lun->dev);
2894 kfree(common->luns);
2895 if (common->free_storage_on_release)
2900 /*-------------------------------------------------------------------------*/
2903 static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
2905 struct fsg_dev *fsg = fsg_from_func(f);
2907 DBG(fsg, "unbind\n");
2908 fsg_common_put(fsg->common);
2913 static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
2915 struct fsg_dev *fsg = fsg_from_func(f);
2916 struct usb_gadget *gadget = c->cdev->gadget;
2921 fsg->gadget = gadget;
2924 i = usb_interface_id(c, f);
2927 fsg_intf_desc.bInterfaceNumber = i;
2928 fsg->interface_number = i;
2930 /* Find all the endpoints we will use */
2931 ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_in_desc);
2934 ep->driver_data = fsg->common; /* claim the endpoint */
2937 ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_out_desc);
2940 ep->driver_data = fsg->common; /* claim the endpoint */
2943 if (gadget_is_dualspeed(gadget)) {
2944 /* Assume endpoint addresses are the same for both speeds */
2945 fsg_hs_bulk_in_desc.bEndpointAddress =
2946 fsg_fs_bulk_in_desc.bEndpointAddress;
2947 fsg_hs_bulk_out_desc.bEndpointAddress =
2948 fsg_fs_bulk_out_desc.bEndpointAddress;
2949 f->hs_descriptors = fsg_hs_function;
2955 ERROR(fsg, "unable to autoconfigure all endpoints\n");
2962 /****************************** ADD FUNCTION ******************************/
2964 static struct usb_gadget_strings *fsg_strings_array[] = {
2969 static int fsg_add(struct usb_composite_dev *cdev,
2970 struct usb_configuration *c,
2971 struct fsg_common *common)
2973 struct fsg_dev *fsg;
2976 fsg = kzalloc(sizeof *fsg, GFP_KERNEL);
2980 fsg->function.name = FSG_DRIVER_DESC;
2981 fsg->function.strings = fsg_strings_array;
2982 fsg->function.descriptors = fsg_fs_function;
2983 fsg->function.bind = fsg_bind;
2984 fsg->function.unbind = fsg_unbind;
2985 fsg->function.setup = fsg_setup;
2986 fsg->function.set_alt = fsg_set_alt;
2987 fsg->function.disable = fsg_disable;
2989 fsg->common = common;
2990 /* Our caller holds a reference to common structure so we
2991 * don't have to be worry about it being freed until we return
2992 * from this function. So instead of incrementing counter now
2993 * and decrement in error recovery we increment it only when
2994 * call to usb_add_function() was successful. */
2996 rc = usb_add_function(c, &fsg->function);
2998 if (likely(rc == 0))
2999 fsg_common_get(fsg->common);
3008 /************************* Module parameters *************************/
3011 struct fsg_module_parameters {
3012 char *file[FSG_MAX_LUNS];
3013 int ro[FSG_MAX_LUNS];
3014 int removable[FSG_MAX_LUNS];
3015 int cdrom[FSG_MAX_LUNS];
3017 unsigned int file_count, ro_count, removable_count, cdrom_count;
3018 unsigned int luns; /* nluns */
3019 int stall; /* can_stall */
3023 #define _FSG_MODULE_PARAM_ARRAY(prefix, params, name, type, desc) \
3024 module_param_array_named(prefix ## name, params.name, type, \
3025 &prefix ## params.name ## _count, \
3027 MODULE_PARM_DESC(prefix ## name, desc)
3029 #define _FSG_MODULE_PARAM(prefix, params, name, type, desc) \
3030 module_param_named(prefix ## name, params.name, type, \
3032 MODULE_PARM_DESC(prefix ## name, desc)
3034 #define FSG_MODULE_PARAMETERS(prefix, params) \
3035 _FSG_MODULE_PARAM_ARRAY(prefix, params, file, charp, \
3036 "names of backing files or devices"); \
3037 _FSG_MODULE_PARAM_ARRAY(prefix, params, ro, bool, \
3038 "true to force read-only"); \
3039 _FSG_MODULE_PARAM_ARRAY(prefix, params, removable, bool, \
3040 "true to simulate removable media"); \
3041 _FSG_MODULE_PARAM_ARRAY(prefix, params, cdrom, bool, \
3042 "true to simulate CD-ROM instead of disk"); \
3043 _FSG_MODULE_PARAM(prefix, params, luns, uint, \
3044 "number of LUNs"); \
3045 _FSG_MODULE_PARAM(prefix, params, stall, bool, \
3046 "false to prevent bulk stalls")
3050 fsg_config_from_params(struct fsg_config *cfg,
3051 const struct fsg_module_parameters *params)
3053 struct fsg_lun_config *lun;
3056 /* Configure LUNs */
3058 min(params->luns ?: (params->file_count ?: 1u),
3059 (unsigned)FSG_MAX_LUNS);
3060 for (i = 0, lun = cfg->luns; i < cfg->nluns; ++i, ++lun) {
3061 lun->ro = !!params->ro[i];
3062 lun->cdrom = !!params->cdrom[i];
3063 lun->removable = /* Removable by default */
3064 params->removable_count <= i || params->removable[i];
3066 params->file_count > i && params->file[i][0]
3071 /* Let MSF use defaults */
3072 cfg->lun_name_format = 0;
3073 cfg->thread_name = 0;
3074 cfg->vendor_name = 0;
3075 cfg->product_name = 0;
3076 cfg->release = 0xffff;
3078 cfg->thread_exits = 0;
3079 cfg->private_data = 0;
3082 cfg->can_stall = params->stall;
3085 static inline struct fsg_common *
3086 fsg_common_from_params(struct fsg_common *common,
3087 struct usb_composite_dev *cdev,
3088 const struct fsg_module_parameters *params)
3089 __attribute__((unused));
3090 static inline struct fsg_common *
3091 fsg_common_from_params(struct fsg_common *common,
3092 struct usb_composite_dev *cdev,
3093 const struct fsg_module_parameters *params)
3095 struct fsg_config cfg;
3096 fsg_config_from_params(&cfg, params);
3097 return fsg_common_init(common, cdev, &cfg);