1 // SPDX-License-Identifier: GPL-2.0
2 /* Target based USB-Gadget
4 * UAS protocol handling, target callbacks, configfs handling,
5 * BBB (USB Mass Storage Class Bulk-Only (BBB) and Transport protocol handling.
7 * Author: Sebastian Andrzej Siewior <bigeasy at linutronix dot de>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/string.h>
13 #include <linux/configfs.h>
14 #include <linux/ctype.h>
15 #include <linux/delay.h>
16 #include <linux/usb/ch9.h>
17 #include <linux/usb/composite.h>
18 #include <linux/usb/gadget.h>
19 #include <linux/usb/storage.h>
20 #include <scsi/scsi_tcq.h>
21 #include <target/target_core_base.h>
22 #include <target/target_core_fabric.h>
23 #include <linux/unaligned.h>
29 #define TPG_INSTANCES 1
32 struct usb_function_instance *func_inst;
36 static struct tpg_instance tpg_instances[TPG_INSTANCES];
38 static DEFINE_MUTEX(tpg_instances_lock);
40 static inline struct f_uas *to_f_uas(struct usb_function *f)
42 return container_of(f, struct f_uas, function);
45 /* Start bot.c code */
47 static int bot_enqueue_cmd_cbw(struct f_uas *fu)
51 if (fu->flags & USBG_BOT_CMD_PEND)
54 ret = usb_ep_queue(fu->ep_out, fu->cmd[0].req, GFP_ATOMIC);
56 fu->flags |= USBG_BOT_CMD_PEND;
60 static void bot_status_complete(struct usb_ep *ep, struct usb_request *req)
62 struct usbg_cmd *cmd = req->context;
63 struct f_uas *fu = cmd->fu;
65 transport_generic_free_cmd(&cmd->se_cmd, 0);
66 if (req->status == -ESHUTDOWN)
70 pr_err("ERR %s(%d)\n", __func__, __LINE__);
72 /* CSW completed, wait for next CBW */
73 bot_enqueue_cmd_cbw(fu);
76 static void bot_enqueue_sense_code(struct f_uas *fu, struct usbg_cmd *cmd)
78 struct bulk_cs_wrap *csw = &fu->bot_status.csw;
80 unsigned int csw_stat;
82 csw_stat = cmd->csw_code;
83 csw->Tag = cmd->bot_tag;
84 csw->Status = csw_stat;
85 fu->bot_status.req->context = cmd;
86 ret = usb_ep_queue(fu->ep_in, fu->bot_status.req, GFP_ATOMIC);
88 pr_err("%s(%d) ERR: %d\n", __func__, __LINE__, ret);
91 static void bot_err_compl(struct usb_ep *ep, struct usb_request *req)
93 struct usbg_cmd *cmd = req->context;
94 struct f_uas *fu = cmd->fu;
97 pr_err("ERR %s(%d)\n", __func__, __LINE__);
100 if (cmd->data_len > ep->maxpacket) {
101 req->length = ep->maxpacket;
102 cmd->data_len -= ep->maxpacket;
104 req->length = cmd->data_len;
108 usb_ep_queue(ep, req, GFP_ATOMIC);
111 bot_enqueue_sense_code(fu, cmd);
114 static void bot_send_bad_status(struct usbg_cmd *cmd)
116 struct f_uas *fu = cmd->fu;
117 struct bulk_cs_wrap *csw = &fu->bot_status.csw;
118 struct usb_request *req;
121 csw->Residue = cpu_to_le32(cmd->data_len);
126 req = fu->bot_req_in;
129 req = fu->bot_req_out;
132 if (cmd->data_len > fu->ep_in->maxpacket) {
133 req->length = ep->maxpacket;
134 cmd->data_len -= ep->maxpacket;
136 req->length = cmd->data_len;
139 req->complete = bot_err_compl;
141 req->buf = fu->cmd[0].buf;
142 usb_ep_queue(ep, req, GFP_KERNEL);
144 bot_enqueue_sense_code(fu, cmd);
148 static int bot_send_status(struct usbg_cmd *cmd, bool moved_data)
150 struct f_uas *fu = cmd->fu;
151 struct bulk_cs_wrap *csw = &fu->bot_status.csw;
154 if (cmd->se_cmd.scsi_status == SAM_STAT_GOOD) {
155 if (!moved_data && cmd->data_len) {
157 * the host wants to move data, we don't. Fill / empty
158 * the pipe and then send the csw with reside set.
160 cmd->csw_code = US_BULK_STAT_OK;
161 bot_send_bad_status(cmd);
165 csw->Tag = cmd->bot_tag;
166 csw->Residue = cpu_to_le32(0);
167 csw->Status = US_BULK_STAT_OK;
168 fu->bot_status.req->context = cmd;
170 ret = usb_ep_queue(fu->ep_in, fu->bot_status.req, GFP_KERNEL);
172 pr_err("%s(%d) ERR: %d\n", __func__, __LINE__, ret);
174 cmd->csw_code = US_BULK_STAT_FAIL;
175 bot_send_bad_status(cmd);
181 * Called after command (no data transfer) or after the write (to device)
182 * operation is completed
184 static int bot_send_status_response(struct usbg_cmd *cmd)
186 bool moved_data = false;
190 return bot_send_status(cmd, moved_data);
193 /* Read request completed, now we have to send the CSW */
194 static void bot_read_compl(struct usb_ep *ep, struct usb_request *req)
196 struct usbg_cmd *cmd = req->context;
199 pr_err("ERR %s(%d)\n", __func__, __LINE__);
201 if (req->status == -ESHUTDOWN) {
202 transport_generic_free_cmd(&cmd->se_cmd, 0);
206 bot_send_status(cmd, true);
209 static int bot_send_read_response(struct usbg_cmd *cmd)
211 struct f_uas *fu = cmd->fu;
212 struct se_cmd *se_cmd = &cmd->se_cmd;
213 struct usb_gadget *gadget = fuas_to_gadget(fu);
216 if (!cmd->data_len) {
217 cmd->csw_code = US_BULK_STAT_PHASE;
218 bot_send_bad_status(cmd);
222 if (!gadget->sg_supported) {
223 cmd->data_buf = kmalloc(se_cmd->data_length, GFP_ATOMIC);
227 sg_copy_to_buffer(se_cmd->t_data_sg,
228 se_cmd->t_data_nents,
230 se_cmd->data_length);
232 fu->bot_req_in->buf = cmd->data_buf;
234 fu->bot_req_in->buf = NULL;
235 fu->bot_req_in->num_sgs = se_cmd->t_data_nents;
236 fu->bot_req_in->sg = se_cmd->t_data_sg;
239 fu->bot_req_in->complete = bot_read_compl;
240 fu->bot_req_in->length = se_cmd->data_length;
241 fu->bot_req_in->context = cmd;
242 ret = usb_ep_queue(fu->ep_in, fu->bot_req_in, GFP_ATOMIC);
244 pr_err("%s(%d)\n", __func__, __LINE__);
248 static void usbg_data_write_cmpl(struct usb_ep *, struct usb_request *);
249 static int usbg_prepare_w_request(struct usbg_cmd *, struct usb_request *);
251 static int bot_send_write_request(struct usbg_cmd *cmd)
253 struct f_uas *fu = cmd->fu;
258 if (!cmd->data_len) {
259 cmd->csw_code = US_BULK_STAT_PHASE;
263 ret = usbg_prepare_w_request(cmd, fu->bot_req_out);
266 ret = usb_ep_queue(fu->ep_out, fu->bot_req_out, GFP_KERNEL);
268 pr_err("%s(%d)\n", __func__, __LINE__);
274 static int bot_submit_command(struct f_uas *, void *, unsigned int);
276 static void bot_cmd_complete(struct usb_ep *ep, struct usb_request *req)
278 struct f_uas *fu = req->context;
281 if (req->status == -ESHUTDOWN)
284 fu->flags &= ~USBG_BOT_CMD_PEND;
286 if (req->status < 0) {
287 struct usb_gadget *gadget = fuas_to_gadget(fu);
289 dev_err(&gadget->dev, "BOT command req err (%d)\n", req->status);
290 bot_enqueue_cmd_cbw(fu);
294 ret = bot_submit_command(fu, req->buf, req->actual);
296 pr_err("%s(%d): %d\n", __func__, __LINE__, ret);
297 if (!(fu->flags & USBG_BOT_WEDGED))
298 usb_ep_set_wedge(fu->ep_in);
300 fu->flags |= USBG_BOT_WEDGED;
301 bot_enqueue_cmd_cbw(fu);
302 } else if (fu->flags & USBG_BOT_WEDGED) {
303 fu->flags &= ~USBG_BOT_WEDGED;
304 usb_ep_clear_halt(fu->ep_in);
308 static int bot_prepare_reqs(struct f_uas *fu)
312 fu->bot_req_in = usb_ep_alloc_request(fu->ep_in, GFP_KERNEL);
316 fu->bot_req_out = usb_ep_alloc_request(fu->ep_out, GFP_KERNEL);
317 if (!fu->bot_req_out)
320 fu->cmd[0].req = usb_ep_alloc_request(fu->ep_out, GFP_KERNEL);
324 fu->bot_status.req = usb_ep_alloc_request(fu->ep_in, GFP_KERNEL);
325 if (!fu->bot_status.req)
328 fu->bot_status.req->buf = &fu->bot_status.csw;
329 fu->bot_status.req->length = US_BULK_CS_WRAP_LEN;
330 fu->bot_status.req->complete = bot_status_complete;
331 fu->bot_status.csw.Signature = cpu_to_le32(US_BULK_CS_SIGN);
333 fu->cmd[0].buf = kmalloc(fu->ep_out->maxpacket, GFP_KERNEL);
337 fu->cmd[0].req->complete = bot_cmd_complete;
338 fu->cmd[0].req->buf = fu->cmd[0].buf;
339 fu->cmd[0].req->length = fu->ep_out->maxpacket;
340 fu->cmd[0].req->context = fu;
342 ret = bot_enqueue_cmd_cbw(fu);
347 kfree(fu->cmd[0].buf);
348 fu->cmd[0].buf = NULL;
350 usb_ep_free_request(fu->ep_in, fu->bot_status.req);
352 usb_ep_free_request(fu->ep_out, fu->cmd[0].req);
353 fu->cmd[0].req = NULL;
355 usb_ep_free_request(fu->ep_out, fu->bot_req_out);
356 fu->bot_req_out = NULL;
358 usb_ep_free_request(fu->ep_in, fu->bot_req_in);
359 fu->bot_req_in = NULL;
361 pr_err("BOT: endpoint setup failed\n");
365 static void bot_cleanup_old_alt(struct f_uas *fu)
367 if (!(fu->flags & USBG_ENABLED))
370 usb_ep_disable(fu->ep_in);
371 usb_ep_disable(fu->ep_out);
376 usb_ep_free_request(fu->ep_in, fu->bot_req_in);
377 usb_ep_free_request(fu->ep_out, fu->bot_req_out);
378 usb_ep_free_request(fu->ep_out, fu->cmd[0].req);
379 usb_ep_free_request(fu->ep_in, fu->bot_status.req);
381 kfree(fu->cmd[0].buf);
383 fu->bot_req_in = NULL;
384 fu->bot_req_out = NULL;
385 fu->cmd[0].req = NULL;
386 fu->bot_status.req = NULL;
387 fu->cmd[0].buf = NULL;
390 static void bot_set_alt(struct f_uas *fu)
392 struct usb_function *f = &fu->function;
393 struct usb_gadget *gadget = f->config->cdev->gadget;
396 fu->flags = USBG_IS_BOT;
398 config_ep_by_speed_and_alt(gadget, f, fu->ep_in, USB_G_ALT_INT_BBB);
399 ret = usb_ep_enable(fu->ep_in);
403 config_ep_by_speed_and_alt(gadget, f, fu->ep_out, USB_G_ALT_INT_BBB);
404 ret = usb_ep_enable(fu->ep_out);
408 ret = bot_prepare_reqs(fu);
411 fu->flags |= USBG_ENABLED;
412 pr_info("Using the BOT protocol\n");
415 usb_ep_disable(fu->ep_out);
417 usb_ep_disable(fu->ep_in);
419 fu->flags = USBG_IS_BOT;
422 static int usbg_bot_setup(struct usb_function *f,
423 const struct usb_ctrlrequest *ctrl)
425 struct f_uas *fu = to_f_uas(f);
426 struct usb_composite_dev *cdev = f->config->cdev;
427 u16 w_value = le16_to_cpu(ctrl->wValue);
428 u16 w_length = le16_to_cpu(ctrl->wLength);
432 switch (ctrl->bRequest) {
433 case US_BULK_GET_MAX_LUN:
434 if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_CLASS |
435 USB_RECIP_INTERFACE))
442 luns = atomic_read(&fu->tpg->tpg_port_count);
444 pr_err("No LUNs configured?\n");
448 if (luns > US_BULK_MAX_LUN_LIMIT) {
449 pr_info_once("Limiting the number of luns to 16\n");
450 luns = US_BULK_MAX_LUN_LIMIT;
452 ret_lun = cdev->req->buf;
454 cdev->req->length = 1;
455 return usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
457 case US_BULK_RESET_REQUEST:
458 /* XXX maybe we should remove previous requests for IN + OUT */
459 if (fu->flags & USBG_BOT_WEDGED) {
460 fu->flags &= ~USBG_BOT_WEDGED;
461 usb_ep_clear_halt(fu->ep_in);
464 bot_enqueue_cmd_cbw(fu);
470 /* Start uas.c code */
472 static int tcm_to_uasp_response(enum tcm_tmrsp_table code)
475 case TMR_FUNCTION_FAILED:
476 return RC_TMF_FAILED;
477 case TMR_FUNCTION_COMPLETE:
478 case TMR_TASK_DOES_NOT_EXIST:
479 return RC_TMF_COMPLETE;
480 case TMR_LUN_DOES_NOT_EXIST:
481 return RC_INCORRECT_LUN;
482 case TMR_FUNCTION_REJECTED:
483 case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
485 return RC_TMF_NOT_SUPPORTED;
489 static unsigned char uasp_to_tcm_func(int code)
493 return TMR_ABORT_TASK;
494 case TMF_ABORT_TASK_SET:
495 return TMR_ABORT_TASK_SET;
496 case TMF_CLEAR_TASK_SET:
497 return TMR_CLEAR_TASK_SET;
498 case TMF_LOGICAL_UNIT_RESET:
499 return TMR_LUN_RESET;
501 return TMR_CLEAR_ACA;
502 case TMF_I_T_NEXUS_RESET:
504 case TMF_QUERY_TASK_SET:
505 case TMF_QUERY_ASYNC_EVENT:
511 static void uasp_cleanup_one_stream(struct f_uas *fu, struct uas_stream *stream)
513 /* We have either all three allocated or none */
517 usb_ep_free_request(fu->ep_in, stream->req_in);
518 usb_ep_free_request(fu->ep_out, stream->req_out);
519 usb_ep_free_request(fu->ep_status, stream->req_status);
521 stream->req_in = NULL;
522 stream->req_out = NULL;
523 stream->req_status = NULL;
526 static void uasp_free_cmdreq(struct f_uas *fu)
530 for (i = 0; i < USBG_NUM_CMDS; i++) {
531 usb_ep_free_request(fu->ep_cmd, fu->cmd[i].req);
532 kfree(fu->cmd[i].buf);
533 fu->cmd[i].req = NULL;
534 fu->cmd[i].buf = NULL;
538 static void uasp_cleanup_old_alt(struct f_uas *fu)
542 if (!(fu->flags & USBG_ENABLED))
545 usb_ep_disable(fu->ep_in);
546 usb_ep_disable(fu->ep_out);
547 usb_ep_disable(fu->ep_status);
548 usb_ep_disable(fu->ep_cmd);
550 for (i = 0; i < USBG_NUM_CMDS; i++)
551 uasp_cleanup_one_stream(fu, &fu->stream[i]);
552 uasp_free_cmdreq(fu);
555 static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req);
557 static int uasp_prepare_r_request(struct usbg_cmd *cmd)
559 struct se_cmd *se_cmd = &cmd->se_cmd;
560 struct f_uas *fu = cmd->fu;
561 struct usb_gadget *gadget = fuas_to_gadget(fu);
562 struct uas_stream *stream = &fu->stream[se_cmd->map_tag];
564 if (!gadget->sg_supported) {
565 cmd->data_buf = kmalloc(se_cmd->data_length, GFP_ATOMIC);
569 sg_copy_to_buffer(se_cmd->t_data_sg,
570 se_cmd->t_data_nents,
572 se_cmd->data_length);
574 stream->req_in->buf = cmd->data_buf;
576 stream->req_in->buf = NULL;
577 stream->req_in->num_sgs = se_cmd->t_data_nents;
578 stream->req_in->sg = se_cmd->t_data_sg;
581 stream->req_in->is_last = 1;
582 stream->req_in->stream_id = cmd->tag;
583 stream->req_in->complete = uasp_status_data_cmpl;
584 stream->req_in->length = se_cmd->data_length;
585 stream->req_in->context = cmd;
587 cmd->state = UASP_SEND_STATUS;
591 static void uasp_prepare_status(struct usbg_cmd *cmd)
593 struct se_cmd *se_cmd = &cmd->se_cmd;
594 struct sense_iu *iu = &cmd->sense_iu;
595 struct uas_stream *stream = &cmd->fu->stream[se_cmd->map_tag];
597 cmd->state = UASP_QUEUE_COMMAND;
598 iu->iu_id = IU_ID_STATUS;
599 iu->tag = cpu_to_be16(cmd->tag);
602 * iu->status_qual = cpu_to_be16(STATUS QUALIFIER SAM-4. Where R U?);
604 iu->len = cpu_to_be16(se_cmd->scsi_sense_length);
605 iu->status = se_cmd->scsi_status;
606 stream->req_status->is_last = 1;
607 stream->req_status->stream_id = cmd->tag;
608 stream->req_status->context = cmd;
609 stream->req_status->length = se_cmd->scsi_sense_length + 16;
610 stream->req_status->buf = iu;
611 stream->req_status->complete = uasp_status_data_cmpl;
614 static void uasp_prepare_response(struct usbg_cmd *cmd)
616 struct se_cmd *se_cmd = &cmd->se_cmd;
617 struct response_iu *rsp_iu = &cmd->response_iu;
618 struct uas_stream *stream = &cmd->fu->stream[se_cmd->map_tag];
620 cmd->state = UASP_QUEUE_COMMAND;
621 rsp_iu->iu_id = IU_ID_RESPONSE;
622 rsp_iu->tag = cpu_to_be16(cmd->tag);
624 if (cmd->tmr_rsp != RC_RESPONSE_UNKNOWN)
625 rsp_iu->response_code = cmd->tmr_rsp;
627 rsp_iu->response_code =
628 tcm_to_uasp_response(se_cmd->se_tmr_req->response);
631 * The UASP driver must support all the task management functions listed
632 * in Table 20 of UAS-r04. To remain compliant while indicate that the
633 * TMR did not go through, report RC_TMF_FAILED instead of
634 * RC_TMF_NOT_SUPPORTED and print a warning to the user.
636 switch (cmd->tmr_func) {
638 case TMF_ABORT_TASK_SET:
639 case TMF_CLEAR_TASK_SET:
640 case TMF_LOGICAL_UNIT_RESET:
642 case TMF_I_T_NEXUS_RESET:
644 case TMF_QUERY_TASK_SET:
645 case TMF_QUERY_ASYNC_EVENT:
646 if (rsp_iu->response_code == RC_TMF_NOT_SUPPORTED) {
647 struct usb_gadget *gadget = fuas_to_gadget(cmd->fu);
649 dev_warn(&gadget->dev, "TMF function %d not supported\n",
651 rsp_iu->response_code = RC_TMF_FAILED;
658 stream->req_status->is_last = 1;
659 stream->req_status->stream_id = cmd->tag;
660 stream->req_status->context = cmd;
661 stream->req_status->length = sizeof(struct response_iu);
662 stream->req_status->buf = rsp_iu;
663 stream->req_status->complete = uasp_status_data_cmpl;
666 static void usbg_release_cmd(struct se_cmd *se_cmd);
667 static int uasp_send_tm_response(struct usbg_cmd *cmd);
669 static void uasp_status_data_cmpl(struct usb_ep *ep, struct usb_request *req)
671 struct usbg_cmd *cmd = req->context;
672 struct f_uas *fu = cmd->fu;
673 struct uas_stream *stream = &fu->stream[cmd->se_cmd.map_tag];
676 if (req->status == -ESHUTDOWN)
679 switch (cmd->state) {
681 ret = uasp_prepare_r_request(cmd);
684 ret = usb_ep_queue(fu->ep_in, stream->req_in, GFP_ATOMIC);
686 pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
689 case UASP_RECEIVE_DATA:
690 ret = usbg_prepare_w_request(cmd, stream->req_out);
693 ret = usb_ep_queue(fu->ep_out, stream->req_out, GFP_ATOMIC);
695 pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
698 case UASP_SEND_STATUS:
699 uasp_prepare_status(cmd);
700 ret = usb_ep_queue(fu->ep_status, stream->req_status,
703 pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
706 case UASP_QUEUE_COMMAND:
708 * Overlapped command detected and cancelled.
709 * So send overlapped attempted status.
711 if (cmd->tmr_rsp == RC_OVERLAPPED_TAG &&
712 req->status == -ECONNRESET) {
713 uasp_send_tm_response(cmd);
717 hash_del(&stream->node);
720 * If no command submitted to target core here, just free the
721 * bitmap index. This is for the cases where f_tcm handles
722 * status response instead of the target core.
724 if (cmd->tmr_rsp != RC_OVERLAPPED_TAG &&
725 cmd->tmr_rsp != RC_RESPONSE_UNKNOWN) {
726 struct se_session *se_sess;
728 se_sess = fu->tpg->tpg_nexus->tvn_se_sess;
729 sbitmap_queue_clear(&se_sess->sess_tag_pool,
731 cmd->se_cmd.map_cpu);
733 transport_generic_free_cmd(&cmd->se_cmd, 0);
736 usb_ep_queue(fu->ep_cmd, cmd->req, GFP_ATOMIC);
737 complete(&stream->cmd_completion);
746 hash_del(&stream->node);
747 transport_generic_free_cmd(&cmd->se_cmd, 0);
750 static int uasp_send_status_response(struct usbg_cmd *cmd)
752 struct f_uas *fu = cmd->fu;
753 struct uas_stream *stream = &fu->stream[cmd->se_cmd.map_tag];
754 struct sense_iu *iu = &cmd->sense_iu;
756 iu->tag = cpu_to_be16(cmd->tag);
758 uasp_prepare_status(cmd);
759 return usb_ep_queue(fu->ep_status, stream->req_status, GFP_ATOMIC);
762 static int uasp_send_tm_response(struct usbg_cmd *cmd)
764 struct f_uas *fu = cmd->fu;
765 struct uas_stream *stream = &fu->stream[cmd->se_cmd.map_tag];
766 struct response_iu *iu = &cmd->response_iu;
768 iu->tag = cpu_to_be16(cmd->tag);
770 uasp_prepare_response(cmd);
771 return usb_ep_queue(fu->ep_status, stream->req_status, GFP_ATOMIC);
774 static int uasp_send_read_response(struct usbg_cmd *cmd)
776 struct f_uas *fu = cmd->fu;
777 struct uas_stream *stream = &fu->stream[cmd->se_cmd.map_tag];
778 struct sense_iu *iu = &cmd->sense_iu;
783 iu->tag = cpu_to_be16(cmd->tag);
784 if (fu->flags & USBG_USE_STREAMS) {
786 ret = uasp_prepare_r_request(cmd);
789 ret = usb_ep_queue(fu->ep_in, stream->req_in, GFP_ATOMIC);
791 pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
792 kfree(cmd->data_buf);
793 cmd->data_buf = NULL;
798 iu->iu_id = IU_ID_READ_READY;
799 iu->tag = cpu_to_be16(cmd->tag);
801 stream->req_status->complete = uasp_status_data_cmpl;
802 stream->req_status->context = cmd;
804 cmd->state = UASP_SEND_DATA;
805 stream->req_status->buf = iu;
806 stream->req_status->length = sizeof(struct iu);
808 ret = usb_ep_queue(fu->ep_status, stream->req_status,
811 pr_err("%s(%d) => %d\n", __func__, __LINE__, ret);
817 static int uasp_send_write_request(struct usbg_cmd *cmd)
819 struct f_uas *fu = cmd->fu;
820 struct se_cmd *se_cmd = &cmd->se_cmd;
821 struct uas_stream *stream = &fu->stream[se_cmd->map_tag];
822 struct sense_iu *iu = &cmd->sense_iu;
827 iu->tag = cpu_to_be16(cmd->tag);
829 if (fu->flags & USBG_USE_STREAMS) {
831 ret = usbg_prepare_w_request(cmd, stream->req_out);
834 ret = usb_ep_queue(fu->ep_out, stream->req_out, GFP_ATOMIC);
836 pr_err("%s(%d)\n", __func__, __LINE__);
840 iu->iu_id = IU_ID_WRITE_READY;
841 iu->tag = cpu_to_be16(cmd->tag);
843 stream->req_status->complete = uasp_status_data_cmpl;
844 stream->req_status->context = cmd;
846 cmd->state = UASP_RECEIVE_DATA;
847 stream->req_status->buf = iu;
848 stream->req_status->length = sizeof(struct iu);
850 ret = usb_ep_queue(fu->ep_status, stream->req_status,
853 pr_err("%s(%d)\n", __func__, __LINE__);
860 static int usbg_submit_command(struct f_uas *, struct usb_request *);
862 static void uasp_cmd_complete(struct usb_ep *ep, struct usb_request *req)
864 struct f_uas *fu = req->context;
866 if (req->status == -ESHUTDOWN)
869 if (req->status < 0) {
870 usb_ep_queue(fu->ep_cmd, req, GFP_ATOMIC);
874 usbg_submit_command(fu, req);
877 static int uasp_alloc_stream_res(struct f_uas *fu, struct uas_stream *stream)
879 init_completion(&stream->cmd_completion);
881 stream->req_in = usb_ep_alloc_request(fu->ep_in, GFP_KERNEL);
885 stream->req_out = usb_ep_alloc_request(fu->ep_out, GFP_KERNEL);
886 if (!stream->req_out)
889 stream->req_status = usb_ep_alloc_request(fu->ep_status, GFP_KERNEL);
890 if (!stream->req_status)
896 usb_ep_free_request(fu->ep_out, stream->req_out);
897 stream->req_out = NULL;
899 usb_ep_free_request(fu->ep_in, stream->req_in);
900 stream->req_in = NULL;
905 static int uasp_alloc_cmd(struct f_uas *fu, int i)
907 fu->cmd[i].req = usb_ep_alloc_request(fu->ep_cmd, GFP_KERNEL);
911 fu->cmd[i].buf = kmalloc(fu->ep_cmd->maxpacket, GFP_KERNEL);
915 fu->cmd[i].req->complete = uasp_cmd_complete;
916 fu->cmd[i].req->buf = fu->cmd[i].buf;
917 fu->cmd[i].req->length = fu->ep_cmd->maxpacket;
918 fu->cmd[i].req->context = fu;
922 usb_ep_free_request(fu->ep_cmd, fu->cmd[i].req);
927 static int uasp_prepare_reqs(struct f_uas *fu)
932 for (i = 0; i < USBG_NUM_CMDS; i++) {
933 ret = uasp_alloc_stream_res(fu, &fu->stream[i]);
938 for (i = 0; i < USBG_NUM_CMDS; i++) {
939 ret = uasp_alloc_cmd(fu, i);
941 goto err_free_stream;
943 ret = usb_ep_queue(fu->ep_cmd, fu->cmd[i].req, GFP_ATOMIC);
945 goto err_free_stream;
951 uasp_free_cmdreq(fu);
956 uasp_cleanup_one_stream(fu, &fu->stream[i - 1]);
960 pr_err("UASP: endpoint setup failed\n");
964 static void uasp_set_alt(struct f_uas *fu)
966 struct usb_function *f = &fu->function;
967 struct usb_gadget *gadget = f->config->cdev->gadget;
970 fu->flags = USBG_IS_UAS;
972 if (gadget->speed >= USB_SPEED_SUPER)
973 fu->flags |= USBG_USE_STREAMS;
975 config_ep_by_speed_and_alt(gadget, f, fu->ep_in, USB_G_ALT_INT_UAS);
976 ret = usb_ep_enable(fu->ep_in);
980 config_ep_by_speed_and_alt(gadget, f, fu->ep_out, USB_G_ALT_INT_UAS);
981 ret = usb_ep_enable(fu->ep_out);
985 config_ep_by_speed_and_alt(gadget, f, fu->ep_cmd, USB_G_ALT_INT_UAS);
986 ret = usb_ep_enable(fu->ep_cmd);
989 config_ep_by_speed_and_alt(gadget, f, fu->ep_status, USB_G_ALT_INT_UAS);
990 ret = usb_ep_enable(fu->ep_status);
994 ret = uasp_prepare_reqs(fu);
997 fu->flags |= USBG_ENABLED;
999 pr_info("Using the UAS protocol\n");
1002 usb_ep_disable(fu->ep_status);
1004 usb_ep_disable(fu->ep_cmd);
1006 usb_ep_disable(fu->ep_out);
1008 usb_ep_disable(fu->ep_in);
1013 static int get_cmd_dir(const unsigned char *cdb)
1025 case SERVICE_ACTION_IN_16:
1026 case MAINTENANCE_IN:
1027 case PERSISTENT_RESERVE_IN:
1028 case SECURITY_PROTOCOL_IN:
1029 case ACCESS_CONTROL_IN:
1031 case READ_BLOCK_LIMITS:
1035 case READ_FORMAT_CAPACITIES:
1039 ret = DMA_FROM_DEVICE;
1047 case MODE_SELECT_10:
1049 case WRITE_VERIFY_12:
1050 case PERSISTENT_RESERVE_OUT:
1051 case MAINTENANCE_OUT:
1052 case SECURITY_PROTOCOL_OUT:
1053 case ACCESS_CONTROL_OUT:
1054 ret = DMA_TO_DEVICE;
1056 case ALLOW_MEDIUM_REMOVAL:
1057 case TEST_UNIT_READY:
1058 case SYNCHRONIZE_CACHE:
1065 case WRITE_FILEMARKS:
1069 #define CMD_DIR_MSG "target: Unknown data direction for SCSI Opcode 0x%02x\n"
1070 pr_warn(CMD_DIR_MSG, cdb[0]);
1077 static void usbg_data_write_cmpl(struct usb_ep *ep, struct usb_request *req)
1079 struct usbg_cmd *cmd = req->context;
1080 struct se_cmd *se_cmd = &cmd->se_cmd;
1082 cmd->state = UASP_QUEUE_COMMAND;
1084 if (req->status == -ESHUTDOWN) {
1085 struct uas_stream *stream = &cmd->fu->stream[se_cmd->map_tag];
1087 hash_del(&stream->node);
1088 target_put_sess_cmd(se_cmd);
1089 transport_generic_free_cmd(&cmd->se_cmd, 0);
1094 pr_err("%s() state %d transfer failed\n", __func__, cmd->state);
1098 if (req->num_sgs == 0) {
1099 sg_copy_from_buffer(se_cmd->t_data_sg,
1100 se_cmd->t_data_nents,
1102 se_cmd->data_length);
1105 cmd->flags |= USBG_CMD_PENDING_DATA_WRITE;
1106 queue_work(cmd->fu->tpg->workqueue, &cmd->work);
1110 target_put_sess_cmd(se_cmd);
1112 /* Command was aborted due to overlapped tag */
1113 if (cmd->state == UASP_QUEUE_COMMAND &&
1114 cmd->tmr_rsp == RC_OVERLAPPED_TAG) {
1115 uasp_send_tm_response(cmd);
1119 transport_send_check_condition_and_sense(se_cmd,
1120 TCM_CHECK_CONDITION_ABORT_CMD, 0);
1123 static int usbg_prepare_w_request(struct usbg_cmd *cmd, struct usb_request *req)
1125 struct se_cmd *se_cmd = &cmd->se_cmd;
1126 struct f_uas *fu = cmd->fu;
1127 struct usb_gadget *gadget = fuas_to_gadget(fu);
1129 if (!gadget->sg_supported) {
1130 cmd->data_buf = kmalloc(se_cmd->data_length, GFP_ATOMIC);
1134 req->buf = cmd->data_buf;
1137 req->num_sgs = se_cmd->t_data_nents;
1138 req->sg = se_cmd->t_data_sg;
1142 req->stream_id = cmd->tag;
1143 req->complete = usbg_data_write_cmpl;
1144 req->length = se_cmd->data_length;
1147 cmd->state = UASP_SEND_STATUS;
1151 static int usbg_send_status_response(struct se_cmd *se_cmd)
1153 struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
1155 struct f_uas *fu = cmd->fu;
1157 if (fu->flags & USBG_IS_BOT)
1158 return bot_send_status_response(cmd);
1160 return uasp_send_status_response(cmd);
1163 static int usbg_send_write_request(struct se_cmd *se_cmd)
1165 struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
1167 struct f_uas *fu = cmd->fu;
1169 if (fu->flags & USBG_IS_BOT)
1170 return bot_send_write_request(cmd);
1172 return uasp_send_write_request(cmd);
1175 static int usbg_send_read_response(struct se_cmd *se_cmd)
1177 struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
1179 struct f_uas *fu = cmd->fu;
1181 if (fu->flags & USBG_IS_BOT)
1182 return bot_send_read_response(cmd);
1184 return uasp_send_read_response(cmd);
1187 static void usbg_aborted_task(struct se_cmd *se_cmd);
1189 static void usbg_submit_tmr(struct usbg_cmd *cmd)
1191 struct se_session *se_sess;
1192 struct se_cmd *se_cmd;
1193 int flags = TARGET_SCF_ACK_KREF;
1195 se_cmd = &cmd->se_cmd;
1196 se_sess = cmd->fu->tpg->tpg_nexus->tvn_se_sess;
1198 target_submit_tmr(se_cmd, se_sess,
1199 cmd->response_iu.add_response_info,
1200 cmd->unpacked_lun, NULL, uasp_to_tcm_func(cmd->tmr_func),
1201 GFP_ATOMIC, cmd->tag, flags);
1204 static void usbg_submit_cmd(struct usbg_cmd *cmd)
1206 struct se_cmd *se_cmd;
1207 struct tcm_usbg_nexus *tv_nexus;
1208 struct usbg_tpg *tpg;
1209 int dir, flags = (TARGET_SCF_UNKNOWN_SIZE | TARGET_SCF_ACK_KREF);
1212 * Note: each command will spawn its own process, and each stage of the
1213 * command is processed sequentially. Should this no longer be the case,
1214 * locking is needed.
1216 if (cmd->flags & USBG_CMD_PENDING_DATA_WRITE) {
1217 target_execute_cmd(&cmd->se_cmd);
1218 cmd->flags &= ~USBG_CMD_PENDING_DATA_WRITE;
1222 se_cmd = &cmd->se_cmd;
1224 tv_nexus = tpg->tpg_nexus;
1225 dir = get_cmd_dir(cmd->cmd_buf);
1229 target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess, cmd->cmd_buf,
1230 cmd->sense_iu.sense, cmd->unpacked_lun, 0,
1231 cmd->prio_attr, dir, flags);
1236 __target_init_cmd(se_cmd,
1237 tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
1238 tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
1239 cmd->prio_attr, cmd->sense_iu.sense,
1240 cmd->unpacked_lun, NULL);
1241 transport_send_check_condition_and_sense(se_cmd,
1242 TCM_UNSUPPORTED_SCSI_OPCODE, 0);
1245 static void usbg_cmd_work(struct work_struct *work)
1247 struct usbg_cmd *cmd = container_of(work, struct usbg_cmd, work);
1250 * Failure is detected by f_tcm here. Skip submitting the command to the
1251 * target core if we already know the failing response and send the usb
1252 * response to the host directly.
1254 if (cmd->tmr_rsp != RC_RESPONSE_UNKNOWN)
1258 usbg_submit_tmr(cmd);
1260 usbg_submit_cmd(cmd);
1265 if (cmd->tmr_rsp == RC_OVERLAPPED_TAG) {
1266 struct f_uas *fu = cmd->fu;
1267 struct se_session *se_sess;
1268 struct uas_stream *stream = NULL;
1269 struct hlist_node *tmp;
1270 struct usbg_cmd *active_cmd = NULL;
1272 se_sess = cmd->fu->tpg->tpg_nexus->tvn_se_sess;
1274 hash_for_each_possible_safe(fu->stream_hash, stream, tmp, node, cmd->tag) {
1275 int i = stream - &fu->stream[0];
1277 active_cmd = &((struct usbg_cmd *)se_sess->sess_cmd_map)[i];
1278 if (active_cmd->tag == cmd->tag)
1283 if (!stream || (active_cmd && active_cmd->tag != cmd->tag)) {
1284 usbg_submit_command(cmd->fu, cmd->req);
1288 reinit_completion(&stream->cmd_completion);
1291 * A UASP command consists of the command, data, and status
1292 * stages, each operating sequentially from different endpoints.
1294 * Each USB endpoint operates independently, and depending on
1295 * hardware implementation, a completion callback for a transfer
1296 * from one endpoint may not reflect the order of completion on
1297 * the wire. This is particularly true for devices with
1298 * endpoints that have independent interrupts and event buffers.
1300 * The driver must still detect misbehaving hosts and respond
1301 * with an overlap status. To reduce false overlap failures,
1302 * allow the active and matching stream ID a brief 1ms to
1303 * complete before responding with an overlap command failure.
1304 * Overlap failure should be rare.
1306 wait_for_completion_timeout(&stream->cmd_completion, msecs_to_jiffies(1));
1308 /* If the previous stream is completed, retry the command. */
1309 if (!hash_hashed(&stream->node)) {
1310 usbg_submit_command(cmd->fu, cmd->req);
1315 * The command isn't submitted to the target core, so we're safe
1316 * to remove the bitmap index from the session tag pool.
1318 sbitmap_queue_clear(&se_sess->sess_tag_pool,
1319 cmd->se_cmd.map_tag,
1320 cmd->se_cmd.map_cpu);
1323 * Overlap command tag detected. Cancel any pending transfer of
1324 * the command submitted to target core.
1326 active_cmd->tmr_rsp = RC_OVERLAPPED_TAG;
1327 usbg_aborted_task(&active_cmd->se_cmd);
1329 /* Send the response after the transfer is aborted. */
1333 uasp_send_tm_response(cmd);
1336 static struct usbg_cmd *usbg_get_cmd(struct f_uas *fu,
1337 struct tcm_usbg_nexus *tv_nexus, u32 scsi_tag)
1339 struct se_session *se_sess = tv_nexus->tvn_se_sess;
1340 struct usbg_cmd *cmd;
1343 tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
1345 return ERR_PTR(-ENOMEM);
1347 cmd = &((struct usbg_cmd *)se_sess->sess_cmd_map)[tag];
1348 memset(cmd, 0, sizeof(*cmd));
1349 cmd->se_cmd.map_tag = tag;
1350 cmd->se_cmd.map_cpu = cpu;
1351 cmd->se_cmd.cpuid = cpu;
1352 cmd->se_cmd.tag = cmd->tag = scsi_tag;
1358 static void usbg_release_cmd(struct se_cmd *);
1360 static int usbg_submit_command(struct f_uas *fu, struct usb_request *req)
1362 struct iu *iu = req->buf;
1363 struct usbg_cmd *cmd;
1364 struct usbg_tpg *tpg = fu->tpg;
1365 struct tcm_usbg_nexus *tv_nexus;
1366 struct uas_stream *stream;
1367 struct hlist_node *tmp;
1368 struct command_iu *cmd_iu;
1372 tv_nexus = tpg->tpg_nexus;
1374 pr_err("Missing nexus, ignoring command\n");
1378 scsi_tag = be16_to_cpup(&iu->tag);
1379 cmd = usbg_get_cmd(fu, tv_nexus, scsi_tag);
1381 pr_err("usbg_get_cmd failed\n");
1387 cmd->tag = scsi_tag;
1388 cmd->se_cmd.tag = scsi_tag;
1390 cmd->tmr_rsp = RC_RESPONSE_UNKNOWN;
1393 cmd_iu = (struct command_iu *)iu;
1395 /* Command and Task Management IUs share the same LUN offset */
1396 cmd->unpacked_lun = scsilun_to_int(&cmd_iu->lun);
1398 if (iu->iu_id != IU_ID_COMMAND && iu->iu_id != IU_ID_TASK_MGMT) {
1399 cmd->tmr_rsp = RC_INVALID_INFO_UNIT;
1403 hash_for_each_possible_safe(fu->stream_hash, stream, tmp, node, scsi_tag) {
1404 struct usbg_cmd *active_cmd;
1405 struct se_session *se_sess;
1406 int i = stream - &fu->stream[0];
1408 se_sess = cmd->fu->tpg->tpg_nexus->tvn_se_sess;
1409 active_cmd = &((struct usbg_cmd *)se_sess->sess_cmd_map)[i];
1411 if (active_cmd->tag == scsi_tag) {
1412 cmd->tmr_rsp = RC_OVERLAPPED_TAG;
1417 stream = &fu->stream[cmd->se_cmd.map_tag];
1418 hash_add(fu->stream_hash, &stream->node, scsi_tag);
1420 if (iu->iu_id == IU_ID_TASK_MGMT) {
1421 struct task_mgmt_iu *tm_iu;
1423 tm_iu = (struct task_mgmt_iu *)iu;
1424 cmd->tmr_func = tm_iu->function;
1428 cmd_len = (cmd_iu->len & ~0x3) + 16;
1429 if (cmd_len > USBG_MAX_CMD) {
1430 target_free_tag(tv_nexus->tvn_se_sess, &cmd->se_cmd);
1431 hash_del(&stream->node);
1434 memcpy(cmd->cmd_buf, cmd_iu->cdb, cmd_len);
1436 switch (cmd_iu->prio_attr & 0x7) {
1438 cmd->prio_attr = TCM_HEAD_TAG;
1440 case UAS_ORDERED_TAG:
1441 cmd->prio_attr = TCM_ORDERED_TAG;
1444 cmd->prio_attr = TCM_ACA_TAG;
1447 pr_debug_once("Unsupported prio_attr: %02x.\n",
1450 case UAS_SIMPLE_TAG:
1451 cmd->prio_attr = TCM_SIMPLE_TAG;
1456 INIT_WORK(&cmd->work, usbg_cmd_work);
1457 queue_work(tpg->workqueue, &cmd->work);
1462 static void bot_cmd_work(struct work_struct *work)
1464 struct usbg_cmd *cmd = container_of(work, struct usbg_cmd, work);
1465 struct se_cmd *se_cmd;
1466 struct tcm_usbg_nexus *tv_nexus;
1467 struct usbg_tpg *tpg;
1468 int flags = TARGET_SCF_ACK_KREF;
1472 * Note: each command will spawn its own process, and each stage of the
1473 * command is processed sequentially. Should this no longer be the case,
1474 * locking is needed.
1476 if (cmd->flags & USBG_CMD_PENDING_DATA_WRITE) {
1477 target_execute_cmd(&cmd->se_cmd);
1478 cmd->flags &= ~USBG_CMD_PENDING_DATA_WRITE;
1482 se_cmd = &cmd->se_cmd;
1484 tv_nexus = tpg->tpg_nexus;
1485 dir = get_cmd_dir(cmd->cmd_buf);
1489 target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess,
1490 cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun,
1491 cmd->data_len, cmd->prio_attr, dir, flags);
1495 __target_init_cmd(se_cmd,
1496 tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
1497 tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
1498 cmd->prio_attr, cmd->sense_iu.sense,
1499 cmd->unpacked_lun, NULL);
1500 transport_send_check_condition_and_sense(se_cmd,
1501 TCM_UNSUPPORTED_SCSI_OPCODE, 0);
1504 static int bot_submit_command(struct f_uas *fu,
1505 void *cmdbuf, unsigned int len)
1507 struct bulk_cb_wrap *cbw = cmdbuf;
1508 struct usbg_cmd *cmd;
1509 struct usbg_tpg *tpg = fu->tpg;
1510 struct tcm_usbg_nexus *tv_nexus;
1513 if (cbw->Signature != cpu_to_le32(US_BULK_CB_SIGN)) {
1514 pr_err("Wrong signature on CBW\n");
1518 pr_err("Wrong length for CBW\n");
1522 cmd_len = cbw->Length;
1523 if (cmd_len < 1 || cmd_len > 16)
1526 tv_nexus = tpg->tpg_nexus;
1528 pr_err("Missing nexus, ignoring command\n");
1532 cmd = usbg_get_cmd(fu, tv_nexus, cbw->Tag);
1534 pr_err("usbg_get_cmd failed\n");
1537 memcpy(cmd->cmd_buf, cbw->CDB, cmd_len);
1539 cmd->bot_tag = cbw->Tag;
1540 cmd->prio_attr = TCM_SIMPLE_TAG;
1541 cmd->unpacked_lun = cbw->Lun;
1542 cmd->is_read = cbw->Flags & US_BULK_FLAG_IN ? 1 : 0;
1543 cmd->data_len = le32_to_cpu(cbw->DataTransferLength);
1544 cmd->se_cmd.tag = le32_to_cpu(cmd->bot_tag);
1547 INIT_WORK(&cmd->work, bot_cmd_work);
1548 queue_work(tpg->workqueue, &cmd->work);
1553 /* Start fabric.c code */
1555 static int usbg_check_true(struct se_portal_group *se_tpg)
1560 static char *usbg_get_fabric_wwn(struct se_portal_group *se_tpg)
1562 struct usbg_tpg *tpg = container_of(se_tpg,
1563 struct usbg_tpg, se_tpg);
1564 struct usbg_tport *tport = tpg->tport;
1566 return &tport->tport_name[0];
1569 static u16 usbg_get_tag(struct se_portal_group *se_tpg)
1571 struct usbg_tpg *tpg = container_of(se_tpg,
1572 struct usbg_tpg, se_tpg);
1573 return tpg->tport_tpgt;
1576 static void usbg_release_cmd(struct se_cmd *se_cmd)
1578 struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
1580 struct se_session *se_sess = se_cmd->se_sess;
1583 kfree(cmd->data_buf);
1584 target_free_tag(se_sess, se_cmd);
1587 static void usbg_queue_tm_rsp(struct se_cmd *se_cmd)
1589 struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd, se_cmd);
1591 uasp_send_tm_response(cmd);
1594 static void usbg_aborted_task(struct se_cmd *se_cmd)
1596 struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd, se_cmd);
1597 struct f_uas *fu = cmd->fu;
1598 struct usb_gadget *gadget = fuas_to_gadget(fu);
1599 struct uas_stream *stream = &fu->stream[se_cmd->map_tag];
1602 if (stream->req_out->status == -EINPROGRESS)
1603 ret = usb_ep_dequeue(fu->ep_out, stream->req_out);
1604 else if (stream->req_in->status == -EINPROGRESS)
1605 ret = usb_ep_dequeue(fu->ep_in, stream->req_in);
1606 else if (stream->req_status->status == -EINPROGRESS)
1607 ret = usb_ep_dequeue(fu->ep_status, stream->req_status);
1610 dev_err(&gadget->dev, "Failed to abort cmd tag %d, (%d)\n",
1613 cmd->state = UASP_QUEUE_COMMAND;
1616 static const char *usbg_check_wwn(const char *name)
1621 n = strstr(name, "naa.");
1626 if (len == 0 || len > USBG_NAMELEN - 1)
1631 static int usbg_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
1633 if (!usbg_check_wwn(name))
1638 static struct se_portal_group *usbg_make_tpg(struct se_wwn *wwn,
1641 struct usbg_tport *tport = container_of(wwn, struct usbg_tport,
1643 struct usbg_tpg *tpg;
1646 struct f_tcm_opts *opts;
1649 if (strstr(name, "tpgt_") != name)
1650 return ERR_PTR(-EINVAL);
1651 if (kstrtoul(name + 5, 0, &tpgt) || tpgt > UINT_MAX)
1652 return ERR_PTR(-EINVAL);
1654 mutex_lock(&tpg_instances_lock);
1655 for (i = 0; i < TPG_INSTANCES; ++i)
1656 if (tpg_instances[i].func_inst && !tpg_instances[i].tpg)
1658 if (i == TPG_INSTANCES)
1661 opts = container_of(tpg_instances[i].func_inst, struct f_tcm_opts,
1663 mutex_lock(&opts->dep_lock);
1667 if (opts->has_dep) {
1668 if (!try_module_get(opts->dependent))
1671 ret = configfs_depend_item_unlocked(
1672 wwn->wwn_group.cg_subsys,
1673 &opts->func_inst.group.cg_item);
1678 tpg = kzalloc(sizeof(struct usbg_tpg), GFP_KERNEL);
1682 mutex_init(&tpg->tpg_mutex);
1683 atomic_set(&tpg->tpg_port_count, 0);
1684 tpg->workqueue = alloc_workqueue("tcm_usb_gadget",
1685 WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
1686 if (!tpg->workqueue)
1690 tpg->tport_tpgt = tpgt;
1693 * SPC doesn't assign a protocol identifier for USB-SCSI, so we
1694 * pretend to be SAS..
1696 ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SAS);
1698 goto free_workqueue;
1700 tpg_instances[i].tpg = tpg;
1701 tpg->fi = tpg_instances[i].func_inst;
1702 mutex_unlock(&opts->dep_lock);
1703 mutex_unlock(&tpg_instances_lock);
1704 return &tpg->se_tpg;
1707 destroy_workqueue(tpg->workqueue);
1712 module_put(opts->dependent);
1714 configfs_undepend_item_unlocked(&opts->func_inst.group.cg_item);
1716 mutex_unlock(&opts->dep_lock);
1718 mutex_unlock(&tpg_instances_lock);
1720 return ERR_PTR(ret);
1723 static int tcm_usbg_drop_nexus(struct usbg_tpg *);
1725 static void usbg_drop_tpg(struct se_portal_group *se_tpg)
1727 struct usbg_tpg *tpg = container_of(se_tpg,
1728 struct usbg_tpg, se_tpg);
1730 struct f_tcm_opts *opts;
1732 tcm_usbg_drop_nexus(tpg);
1733 core_tpg_deregister(se_tpg);
1734 destroy_workqueue(tpg->workqueue);
1736 mutex_lock(&tpg_instances_lock);
1737 for (i = 0; i < TPG_INSTANCES; ++i)
1738 if (tpg_instances[i].tpg == tpg)
1740 if (i < TPG_INSTANCES) {
1741 tpg_instances[i].tpg = NULL;
1742 opts = container_of(tpg_instances[i].func_inst,
1743 struct f_tcm_opts, func_inst);
1744 mutex_lock(&opts->dep_lock);
1746 module_put(opts->dependent);
1748 configfs_undepend_item_unlocked(
1749 &opts->func_inst.group.cg_item);
1750 mutex_unlock(&opts->dep_lock);
1752 mutex_unlock(&tpg_instances_lock);
1757 static struct se_wwn *usbg_make_tport(
1758 struct target_fabric_configfs *tf,
1759 struct config_group *group,
1762 struct usbg_tport *tport;
1763 const char *wnn_name;
1766 wnn_name = usbg_check_wwn(name);
1768 return ERR_PTR(-EINVAL);
1770 tport = kzalloc(sizeof(struct usbg_tport), GFP_KERNEL);
1772 return ERR_PTR(-ENOMEM);
1774 tport->tport_wwpn = wwpn;
1775 snprintf(tport->tport_name, sizeof(tport->tport_name), "%s", wnn_name);
1776 return &tport->tport_wwn;
1779 static void usbg_drop_tport(struct se_wwn *wwn)
1781 struct usbg_tport *tport = container_of(wwn,
1782 struct usbg_tport, tport_wwn);
1787 * If somebody feels like dropping the version property, go ahead.
1789 static ssize_t usbg_wwn_version_show(struct config_item *item, char *page)
1791 return sprintf(page, "usb-gadget fabric module\n");
1794 CONFIGFS_ATTR_RO(usbg_wwn_, version);
1796 static struct configfs_attribute *usbg_wwn_attrs[] = {
1797 &usbg_wwn_attr_version,
1801 static int usbg_attach(struct usbg_tpg *);
1802 static void usbg_detach(struct usbg_tpg *);
1804 static int usbg_enable_tpg(struct se_portal_group *se_tpg, bool enable)
1806 struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
1810 ret = usbg_attach(tpg);
1816 tpg->gadget_connect = enable;
1821 static ssize_t tcm_usbg_tpg_nexus_show(struct config_item *item, char *page)
1823 struct se_portal_group *se_tpg = to_tpg(item);
1824 struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
1825 struct tcm_usbg_nexus *tv_nexus;
1828 mutex_lock(&tpg->tpg_mutex);
1829 tv_nexus = tpg->tpg_nexus;
1834 ret = sysfs_emit(page, "%s\n",
1835 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1837 mutex_unlock(&tpg->tpg_mutex);
1841 static int usbg_alloc_sess_cb(struct se_portal_group *se_tpg,
1842 struct se_session *se_sess, void *p)
1844 struct usbg_tpg *tpg = container_of(se_tpg,
1845 struct usbg_tpg, se_tpg);
1851 static int tcm_usbg_make_nexus(struct usbg_tpg *tpg, char *name)
1853 struct tcm_usbg_nexus *tv_nexus;
1856 mutex_lock(&tpg->tpg_mutex);
1857 if (tpg->tpg_nexus) {
1859 pr_debug("tpg->tpg_nexus already exists\n");
1863 tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
1869 tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg,
1870 USB_G_DEFAULT_SESSION_TAGS,
1871 sizeof(struct usbg_cmd),
1872 TARGET_PROT_NORMAL, name,
1873 tv_nexus, usbg_alloc_sess_cb);
1874 if (IS_ERR(tv_nexus->tvn_se_sess)) {
1875 #define MAKE_NEXUS_MSG "core_tpg_check_initiator_node_acl() failed for %s\n"
1876 pr_debug(MAKE_NEXUS_MSG, name);
1877 #undef MAKE_NEXUS_MSG
1878 ret = PTR_ERR(tv_nexus->tvn_se_sess);
1883 mutex_unlock(&tpg->tpg_mutex);
1887 static int tcm_usbg_drop_nexus(struct usbg_tpg *tpg)
1889 struct se_session *se_sess;
1890 struct tcm_usbg_nexus *tv_nexus;
1893 mutex_lock(&tpg->tpg_mutex);
1894 tv_nexus = tpg->tpg_nexus;
1898 se_sess = tv_nexus->tvn_se_sess;
1902 if (atomic_read(&tpg->tpg_port_count)) {
1904 #define MSG "Unable to remove Host I_T Nexus with active TPG port count: %d\n"
1905 pr_err(MSG, atomic_read(&tpg->tpg_port_count));
1910 pr_debug("Removing I_T Nexus to Initiator Port: %s\n",
1911 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1913 * Release the SCSI I_T Nexus to the emulated vHost Target Port
1915 target_remove_session(se_sess);
1916 tpg->tpg_nexus = NULL;
1921 mutex_unlock(&tpg->tpg_mutex);
1925 static ssize_t tcm_usbg_tpg_nexus_store(struct config_item *item,
1926 const char *page, size_t count)
1928 struct se_portal_group *se_tpg = to_tpg(item);
1929 struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
1930 unsigned char i_port[USBG_NAMELEN], *ptr;
1933 if (!strncmp(page, "NULL", 4)) {
1934 ret = tcm_usbg_drop_nexus(tpg);
1935 return (!ret) ? count : ret;
1937 if (strlen(page) >= USBG_NAMELEN) {
1939 #define NEXUS_STORE_MSG "Emulated NAA Sas Address: %s, exceeds max: %d\n"
1940 pr_err(NEXUS_STORE_MSG, page, USBG_NAMELEN);
1941 #undef NEXUS_STORE_MSG
1944 snprintf(i_port, USBG_NAMELEN, "%s", page);
1946 ptr = strstr(i_port, "naa.");
1948 pr_err("Missing 'naa.' prefix\n");
1952 if (i_port[strlen(i_port) - 1] == '\n')
1953 i_port[strlen(i_port) - 1] = '\0';
1955 ret = tcm_usbg_make_nexus(tpg, &i_port[0]);
1961 CONFIGFS_ATTR(tcm_usbg_tpg_, nexus);
1963 static struct configfs_attribute *usbg_base_attrs[] = {
1964 &tcm_usbg_tpg_attr_nexus,
1968 static int usbg_port_link(struct se_portal_group *se_tpg, struct se_lun *lun)
1970 struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
1972 atomic_inc(&tpg->tpg_port_count);
1973 smp_mb__after_atomic();
1977 static void usbg_port_unlink(struct se_portal_group *se_tpg,
1978 struct se_lun *se_lun)
1980 struct usbg_tpg *tpg = container_of(se_tpg, struct usbg_tpg, se_tpg);
1982 atomic_dec(&tpg->tpg_port_count);
1983 smp_mb__after_atomic();
1986 static int usbg_check_stop_free(struct se_cmd *se_cmd)
1988 return target_put_sess_cmd(se_cmd);
1991 static const struct target_core_fabric_ops usbg_ops = {
1992 .module = THIS_MODULE,
1993 .fabric_name = "usb_gadget",
1994 .tpg_get_wwn = usbg_get_fabric_wwn,
1995 .tpg_get_tag = usbg_get_tag,
1996 .tpg_check_demo_mode = usbg_check_true,
1997 .release_cmd = usbg_release_cmd,
1998 .sess_get_initiator_sid = NULL,
1999 .write_pending = usbg_send_write_request,
2000 .queue_data_in = usbg_send_read_response,
2001 .queue_status = usbg_send_status_response,
2002 .queue_tm_rsp = usbg_queue_tm_rsp,
2003 .aborted_task = usbg_aborted_task,
2004 .check_stop_free = usbg_check_stop_free,
2006 .fabric_make_wwn = usbg_make_tport,
2007 .fabric_drop_wwn = usbg_drop_tport,
2008 .fabric_make_tpg = usbg_make_tpg,
2009 .fabric_enable_tpg = usbg_enable_tpg,
2010 .fabric_drop_tpg = usbg_drop_tpg,
2011 .fabric_post_link = usbg_port_link,
2012 .fabric_pre_unlink = usbg_port_unlink,
2013 .fabric_init_nodeacl = usbg_init_nodeacl,
2015 .tfc_wwn_attrs = usbg_wwn_attrs,
2016 .tfc_tpg_base_attrs = usbg_base_attrs,
2018 .default_submit_type = TARGET_DIRECT_SUBMIT,
2019 .direct_submit_supp = 1,
2022 /* Start gadget.c code */
2024 static struct usb_interface_descriptor bot_intf_desc = {
2025 .bLength = sizeof(bot_intf_desc),
2026 .bDescriptorType = USB_DT_INTERFACE,
2028 .bAlternateSetting = USB_G_ALT_INT_BBB,
2029 .bInterfaceClass = USB_CLASS_MASS_STORAGE,
2030 .bInterfaceSubClass = USB_SC_SCSI,
2031 .bInterfaceProtocol = USB_PR_BULK,
2034 static struct usb_interface_descriptor uasp_intf_desc = {
2035 .bLength = sizeof(uasp_intf_desc),
2036 .bDescriptorType = USB_DT_INTERFACE,
2038 .bAlternateSetting = USB_G_ALT_INT_UAS,
2039 .bInterfaceClass = USB_CLASS_MASS_STORAGE,
2040 .bInterfaceSubClass = USB_SC_SCSI,
2041 .bInterfaceProtocol = USB_PR_UAS,
2044 static struct usb_endpoint_descriptor uasp_bi_desc = {
2045 .bLength = USB_DT_ENDPOINT_SIZE,
2046 .bDescriptorType = USB_DT_ENDPOINT,
2047 .bEndpointAddress = USB_DIR_IN,
2048 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2049 .wMaxPacketSize = cpu_to_le16(512),
2052 static struct usb_endpoint_descriptor uasp_fs_bi_desc = {
2053 .bLength = USB_DT_ENDPOINT_SIZE,
2054 .bDescriptorType = USB_DT_ENDPOINT,
2055 .bEndpointAddress = USB_DIR_IN,
2056 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2059 static struct usb_pipe_usage_descriptor uasp_bi_pipe_desc = {
2060 .bLength = sizeof(uasp_bi_pipe_desc),
2061 .bDescriptorType = USB_DT_PIPE_USAGE,
2062 .bPipeID = DATA_IN_PIPE_ID,
2065 static struct usb_endpoint_descriptor uasp_ss_bi_desc = {
2066 .bLength = USB_DT_ENDPOINT_SIZE,
2067 .bDescriptorType = USB_DT_ENDPOINT,
2068 .bEndpointAddress = USB_DIR_IN,
2069 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2070 .wMaxPacketSize = cpu_to_le16(1024),
2073 static struct usb_ss_ep_comp_descriptor uasp_bi_ep_comp_desc = {
2074 .bLength = sizeof(uasp_bi_ep_comp_desc),
2075 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
2077 .bmAttributes = UASP_SS_EP_COMP_LOG_STREAMS,
2078 .wBytesPerInterval = 0,
2081 static struct usb_ss_ep_comp_descriptor bot_bi_ep_comp_desc = {
2082 .bLength = sizeof(bot_bi_ep_comp_desc),
2083 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
2087 static struct usb_endpoint_descriptor uasp_bo_desc = {
2088 .bLength = USB_DT_ENDPOINT_SIZE,
2089 .bDescriptorType = USB_DT_ENDPOINT,
2090 .bEndpointAddress = USB_DIR_OUT,
2091 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2092 .wMaxPacketSize = cpu_to_le16(512),
2095 static struct usb_endpoint_descriptor uasp_fs_bo_desc = {
2096 .bLength = USB_DT_ENDPOINT_SIZE,
2097 .bDescriptorType = USB_DT_ENDPOINT,
2098 .bEndpointAddress = USB_DIR_OUT,
2099 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2102 static struct usb_pipe_usage_descriptor uasp_bo_pipe_desc = {
2103 .bLength = sizeof(uasp_bo_pipe_desc),
2104 .bDescriptorType = USB_DT_PIPE_USAGE,
2105 .bPipeID = DATA_OUT_PIPE_ID,
2108 static struct usb_endpoint_descriptor uasp_ss_bo_desc = {
2109 .bLength = USB_DT_ENDPOINT_SIZE,
2110 .bDescriptorType = USB_DT_ENDPOINT,
2111 .bEndpointAddress = USB_DIR_OUT,
2112 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2113 .wMaxPacketSize = cpu_to_le16(0x400),
2116 static struct usb_ss_ep_comp_descriptor uasp_bo_ep_comp_desc = {
2117 .bLength = sizeof(uasp_bo_ep_comp_desc),
2118 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
2120 .bmAttributes = UASP_SS_EP_COMP_LOG_STREAMS,
2123 static struct usb_ss_ep_comp_descriptor bot_bo_ep_comp_desc = {
2124 .bLength = sizeof(bot_bo_ep_comp_desc),
2125 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
2129 static struct usb_endpoint_descriptor uasp_status_desc = {
2130 .bLength = USB_DT_ENDPOINT_SIZE,
2131 .bDescriptorType = USB_DT_ENDPOINT,
2132 .bEndpointAddress = USB_DIR_IN,
2133 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2134 .wMaxPacketSize = cpu_to_le16(512),
2137 static struct usb_endpoint_descriptor uasp_fs_status_desc = {
2138 .bLength = USB_DT_ENDPOINT_SIZE,
2139 .bDescriptorType = USB_DT_ENDPOINT,
2140 .bEndpointAddress = USB_DIR_IN,
2141 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2144 static struct usb_pipe_usage_descriptor uasp_status_pipe_desc = {
2145 .bLength = sizeof(uasp_status_pipe_desc),
2146 .bDescriptorType = USB_DT_PIPE_USAGE,
2147 .bPipeID = STATUS_PIPE_ID,
2150 static struct usb_endpoint_descriptor uasp_ss_status_desc = {
2151 .bLength = USB_DT_ENDPOINT_SIZE,
2152 .bDescriptorType = USB_DT_ENDPOINT,
2153 .bEndpointAddress = USB_DIR_IN,
2154 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2155 .wMaxPacketSize = cpu_to_le16(1024),
2158 static struct usb_ss_ep_comp_descriptor uasp_status_in_ep_comp_desc = {
2159 .bLength = sizeof(uasp_status_in_ep_comp_desc),
2160 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
2161 .bmAttributes = UASP_SS_EP_COMP_LOG_STREAMS,
2164 static struct usb_endpoint_descriptor uasp_cmd_desc = {
2165 .bLength = USB_DT_ENDPOINT_SIZE,
2166 .bDescriptorType = USB_DT_ENDPOINT,
2167 .bEndpointAddress = USB_DIR_OUT,
2168 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2169 .wMaxPacketSize = cpu_to_le16(512),
2172 static struct usb_endpoint_descriptor uasp_fs_cmd_desc = {
2173 .bLength = USB_DT_ENDPOINT_SIZE,
2174 .bDescriptorType = USB_DT_ENDPOINT,
2175 .bEndpointAddress = USB_DIR_OUT,
2176 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2179 static struct usb_pipe_usage_descriptor uasp_cmd_pipe_desc = {
2180 .bLength = sizeof(uasp_cmd_pipe_desc),
2181 .bDescriptorType = USB_DT_PIPE_USAGE,
2182 .bPipeID = CMD_PIPE_ID,
2185 static struct usb_endpoint_descriptor uasp_ss_cmd_desc = {
2186 .bLength = USB_DT_ENDPOINT_SIZE,
2187 .bDescriptorType = USB_DT_ENDPOINT,
2188 .bEndpointAddress = USB_DIR_OUT,
2189 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2190 .wMaxPacketSize = cpu_to_le16(1024),
2193 static struct usb_ss_ep_comp_descriptor uasp_cmd_comp_desc = {
2194 .bLength = sizeof(uasp_cmd_comp_desc),
2195 .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
2198 static struct usb_descriptor_header *uasp_fs_function_desc[] = {
2199 (struct usb_descriptor_header *) &bot_intf_desc,
2200 (struct usb_descriptor_header *) &uasp_fs_bi_desc,
2201 (struct usb_descriptor_header *) &uasp_fs_bo_desc,
2203 (struct usb_descriptor_header *) &uasp_intf_desc,
2204 (struct usb_descriptor_header *) &uasp_fs_bi_desc,
2205 (struct usb_descriptor_header *) &uasp_bi_pipe_desc,
2206 (struct usb_descriptor_header *) &uasp_fs_bo_desc,
2207 (struct usb_descriptor_header *) &uasp_bo_pipe_desc,
2208 (struct usb_descriptor_header *) &uasp_fs_status_desc,
2209 (struct usb_descriptor_header *) &uasp_status_pipe_desc,
2210 (struct usb_descriptor_header *) &uasp_fs_cmd_desc,
2211 (struct usb_descriptor_header *) &uasp_cmd_pipe_desc,
2215 static struct usb_descriptor_header *uasp_hs_function_desc[] = {
2216 (struct usb_descriptor_header *) &bot_intf_desc,
2217 (struct usb_descriptor_header *) &uasp_bi_desc,
2218 (struct usb_descriptor_header *) &uasp_bo_desc,
2220 (struct usb_descriptor_header *) &uasp_intf_desc,
2221 (struct usb_descriptor_header *) &uasp_bi_desc,
2222 (struct usb_descriptor_header *) &uasp_bi_pipe_desc,
2223 (struct usb_descriptor_header *) &uasp_bo_desc,
2224 (struct usb_descriptor_header *) &uasp_bo_pipe_desc,
2225 (struct usb_descriptor_header *) &uasp_status_desc,
2226 (struct usb_descriptor_header *) &uasp_status_pipe_desc,
2227 (struct usb_descriptor_header *) &uasp_cmd_desc,
2228 (struct usb_descriptor_header *) &uasp_cmd_pipe_desc,
2232 static struct usb_descriptor_header *uasp_ss_function_desc[] = {
2233 (struct usb_descriptor_header *) &bot_intf_desc,
2234 (struct usb_descriptor_header *) &uasp_ss_bi_desc,
2235 (struct usb_descriptor_header *) &bot_bi_ep_comp_desc,
2236 (struct usb_descriptor_header *) &uasp_ss_bo_desc,
2237 (struct usb_descriptor_header *) &bot_bo_ep_comp_desc,
2239 (struct usb_descriptor_header *) &uasp_intf_desc,
2240 (struct usb_descriptor_header *) &uasp_ss_bi_desc,
2241 (struct usb_descriptor_header *) &uasp_bi_ep_comp_desc,
2242 (struct usb_descriptor_header *) &uasp_bi_pipe_desc,
2243 (struct usb_descriptor_header *) &uasp_ss_bo_desc,
2244 (struct usb_descriptor_header *) &uasp_bo_ep_comp_desc,
2245 (struct usb_descriptor_header *) &uasp_bo_pipe_desc,
2246 (struct usb_descriptor_header *) &uasp_ss_status_desc,
2247 (struct usb_descriptor_header *) &uasp_status_in_ep_comp_desc,
2248 (struct usb_descriptor_header *) &uasp_status_pipe_desc,
2249 (struct usb_descriptor_header *) &uasp_ss_cmd_desc,
2250 (struct usb_descriptor_header *) &uasp_cmd_comp_desc,
2251 (struct usb_descriptor_header *) &uasp_cmd_pipe_desc,
2255 static struct usb_string tcm_us_strings[] = {
2256 [USB_G_STR_INT_UAS].s = "USB Attached SCSI",
2257 [USB_G_STR_INT_BBB].s = "Bulk Only Transport",
2261 static struct usb_gadget_strings tcm_stringtab = {
2263 .strings = tcm_us_strings,
2266 static struct usb_gadget_strings *tcm_strings[] = {
2271 static int tcm_bind(struct usb_configuration *c, struct usb_function *f)
2273 struct f_uas *fu = to_f_uas(f);
2274 struct usb_string *us;
2275 struct usb_gadget *gadget = c->cdev->gadget;
2277 struct f_tcm_opts *opts;
2281 opts = container_of(f->fi, struct f_tcm_opts, func_inst);
2283 mutex_lock(&opts->dep_lock);
2284 if (!opts->can_attach) {
2285 mutex_unlock(&opts->dep_lock);
2288 mutex_unlock(&opts->dep_lock);
2289 us = usb_gstrings_attach(c->cdev, tcm_strings,
2290 ARRAY_SIZE(tcm_us_strings));
2293 bot_intf_desc.iInterface = us[USB_G_STR_INT_BBB].id;
2294 uasp_intf_desc.iInterface = us[USB_G_STR_INT_UAS].id;
2296 iface = usb_interface_id(c, f);
2300 bot_intf_desc.bInterfaceNumber = iface;
2301 uasp_intf_desc.bInterfaceNumber = iface;
2303 ep = usb_ep_autoconfig(gadget, &uasp_fs_bi_desc);
2309 ep = usb_ep_autoconfig(gadget, &uasp_fs_bo_desc);
2314 ep = usb_ep_autoconfig(gadget, &uasp_fs_status_desc);
2319 ep = usb_ep_autoconfig(gadget, &uasp_fs_cmd_desc);
2324 /* Assume endpoint addresses are the same for both speeds */
2325 uasp_bi_desc.bEndpointAddress = uasp_fs_bi_desc.bEndpointAddress;
2326 uasp_bo_desc.bEndpointAddress = uasp_fs_bo_desc.bEndpointAddress;
2327 uasp_status_desc.bEndpointAddress =
2328 uasp_fs_status_desc.bEndpointAddress;
2329 uasp_cmd_desc.bEndpointAddress = uasp_fs_cmd_desc.bEndpointAddress;
2331 uasp_ss_bi_desc.bEndpointAddress = uasp_fs_bi_desc.bEndpointAddress;
2332 uasp_ss_bo_desc.bEndpointAddress = uasp_fs_bo_desc.bEndpointAddress;
2333 uasp_ss_status_desc.bEndpointAddress =
2334 uasp_fs_status_desc.bEndpointAddress;
2335 uasp_ss_cmd_desc.bEndpointAddress = uasp_fs_cmd_desc.bEndpointAddress;
2337 ret = usb_assign_descriptors(f, uasp_fs_function_desc,
2338 uasp_hs_function_desc, uasp_ss_function_desc,
2339 uasp_ss_function_desc);
2345 pr_err("Can't claim all required eps\n");
2350 struct guas_setup_wq {
2351 struct work_struct work;
2356 static void tcm_delayed_set_alt(struct work_struct *wq)
2358 struct guas_setup_wq *work = container_of(wq, struct guas_setup_wq,
2360 struct f_uas *fu = work->fu;
2361 int alt = work->alt;
2365 if (fu->flags & USBG_IS_BOT)
2366 bot_cleanup_old_alt(fu);
2367 if (fu->flags & USBG_IS_UAS)
2368 uasp_cleanup_old_alt(fu);
2370 if (alt == USB_G_ALT_INT_BBB)
2372 else if (alt == USB_G_ALT_INT_UAS)
2374 usb_composite_setup_continue(fu->function.config->cdev);
2377 static int tcm_get_alt(struct usb_function *f, unsigned intf)
2379 struct f_uas *fu = to_f_uas(f);
2381 if (fu->iface != intf)
2384 if (fu->flags & USBG_IS_BOT)
2385 return USB_G_ALT_INT_BBB;
2386 else if (fu->flags & USBG_IS_UAS)
2387 return USB_G_ALT_INT_UAS;
2392 static int tcm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
2394 struct f_uas *fu = to_f_uas(f);
2396 if (fu->iface != intf)
2399 if ((alt == USB_G_ALT_INT_BBB) || (alt == USB_G_ALT_INT_UAS)) {
2400 struct guas_setup_wq *work;
2402 work = kmalloc(sizeof(*work), GFP_ATOMIC);
2405 INIT_WORK(&work->work, tcm_delayed_set_alt);
2408 schedule_work(&work->work);
2409 return USB_GADGET_DELAYED_STATUS;
2414 static void tcm_disable(struct usb_function *f)
2416 struct f_uas *fu = to_f_uas(f);
2418 if (fu->flags & USBG_IS_UAS)
2419 uasp_cleanup_old_alt(fu);
2420 else if (fu->flags & USBG_IS_BOT)
2421 bot_cleanup_old_alt(fu);
2425 static int tcm_setup(struct usb_function *f,
2426 const struct usb_ctrlrequest *ctrl)
2428 struct f_uas *fu = to_f_uas(f);
2430 if (!(fu->flags & USBG_IS_BOT))
2433 return usbg_bot_setup(f, ctrl);
2436 static inline struct f_tcm_opts *to_f_tcm_opts(struct config_item *item)
2438 return container_of(to_config_group(item), struct f_tcm_opts,
2442 static void tcm_attr_release(struct config_item *item)
2444 struct f_tcm_opts *opts = to_f_tcm_opts(item);
2446 usb_put_function_instance(&opts->func_inst);
2449 static struct configfs_item_operations tcm_item_ops = {
2450 .release = tcm_attr_release,
2453 static const struct config_item_type tcm_func_type = {
2454 .ct_item_ops = &tcm_item_ops,
2455 .ct_owner = THIS_MODULE,
2458 static void tcm_free_inst(struct usb_function_instance *f)
2460 struct f_tcm_opts *opts;
2463 opts = container_of(f, struct f_tcm_opts, func_inst);
2465 mutex_lock(&tpg_instances_lock);
2466 for (i = 0; i < TPG_INSTANCES; ++i)
2467 if (tpg_instances[i].func_inst == f)
2469 if (i < TPG_INSTANCES)
2470 tpg_instances[i].func_inst = NULL;
2471 mutex_unlock(&tpg_instances_lock);
2476 static int tcm_register_callback(struct usb_function_instance *f)
2478 struct f_tcm_opts *opts = container_of(f, struct f_tcm_opts, func_inst);
2480 mutex_lock(&opts->dep_lock);
2481 opts->can_attach = true;
2482 mutex_unlock(&opts->dep_lock);
2487 static void tcm_unregister_callback(struct usb_function_instance *f)
2489 struct f_tcm_opts *opts = container_of(f, struct f_tcm_opts, func_inst);
2491 mutex_lock(&opts->dep_lock);
2492 unregister_gadget_item(opts->
2493 func_inst.group.cg_item.ci_parent->ci_parent);
2494 opts->can_attach = false;
2495 mutex_unlock(&opts->dep_lock);
2498 static int usbg_attach(struct usbg_tpg *tpg)
2500 struct usb_function_instance *f = tpg->fi;
2501 struct f_tcm_opts *opts = container_of(f, struct f_tcm_opts, func_inst);
2503 if (opts->tcm_register_callback)
2504 return opts->tcm_register_callback(f);
2509 static void usbg_detach(struct usbg_tpg *tpg)
2511 struct usb_function_instance *f = tpg->fi;
2512 struct f_tcm_opts *opts = container_of(f, struct f_tcm_opts, func_inst);
2514 if (opts->tcm_unregister_callback)
2515 opts->tcm_unregister_callback(f);
2518 static int tcm_set_name(struct usb_function_instance *f, const char *name)
2520 struct f_tcm_opts *opts = container_of(f, struct f_tcm_opts, func_inst);
2522 pr_debug("tcm: Activating %s\n", name);
2524 mutex_lock(&opts->dep_lock);
2526 mutex_unlock(&opts->dep_lock);
2531 static struct usb_function_instance *tcm_alloc_inst(void)
2533 struct f_tcm_opts *opts;
2537 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
2539 return ERR_PTR(-ENOMEM);
2541 mutex_lock(&tpg_instances_lock);
2542 for (i = 0; i < TPG_INSTANCES; ++i)
2543 if (!tpg_instances[i].func_inst)
2546 if (i == TPG_INSTANCES) {
2547 mutex_unlock(&tpg_instances_lock);
2549 return ERR_PTR(-EBUSY);
2551 tpg_instances[i].func_inst = &opts->func_inst;
2552 mutex_unlock(&tpg_instances_lock);
2554 mutex_init(&opts->dep_lock);
2555 opts->func_inst.set_inst_name = tcm_set_name;
2556 opts->func_inst.free_func_inst = tcm_free_inst;
2557 opts->tcm_register_callback = tcm_register_callback;
2558 opts->tcm_unregister_callback = tcm_unregister_callback;
2560 config_group_init_type_name(&opts->func_inst.group, "",
2563 return &opts->func_inst;
2566 static void tcm_free(struct usb_function *f)
2568 struct f_uas *tcm = to_f_uas(f);
2573 static void tcm_unbind(struct usb_configuration *c, struct usb_function *f)
2575 usb_free_all_descriptors(f);
2578 static struct usb_function *tcm_alloc(struct usb_function_instance *fi)
2583 mutex_lock(&tpg_instances_lock);
2584 for (i = 0; i < TPG_INSTANCES; ++i)
2585 if (tpg_instances[i].func_inst == fi)
2587 if (i == TPG_INSTANCES) {
2588 mutex_unlock(&tpg_instances_lock);
2589 return ERR_PTR(-ENODEV);
2592 fu = kzalloc(sizeof(*fu), GFP_KERNEL);
2594 mutex_unlock(&tpg_instances_lock);
2595 return ERR_PTR(-ENOMEM);
2598 fu->function.name = "Target Function";
2599 fu->function.bind = tcm_bind;
2600 fu->function.unbind = tcm_unbind;
2601 fu->function.set_alt = tcm_set_alt;
2602 fu->function.get_alt = tcm_get_alt;
2603 fu->function.setup = tcm_setup;
2604 fu->function.disable = tcm_disable;
2605 fu->function.free_func = tcm_free;
2606 fu->tpg = tpg_instances[i].tpg;
2608 hash_init(fu->stream_hash);
2609 mutex_unlock(&tpg_instances_lock);
2611 return &fu->function;
2614 DECLARE_USB_FUNCTION(tcm, tcm_alloc_inst, tcm_alloc);
2616 static int __init tcm_init(void)
2620 ret = usb_function_register(&tcmusb_func);
2624 ret = target_register_template(&usbg_ops);
2626 usb_function_unregister(&tcmusb_func);
2630 module_init(tcm_init);
2632 static void __exit tcm_exit(void)
2634 target_unregister_template(&usbg_ops);
2635 usb_function_unregister(&tcmusb_func);
2637 module_exit(tcm_exit);
2639 MODULE_DESCRIPTION("Target based USB-Gadget");
2640 MODULE_LICENSE("GPL");
2641 MODULE_AUTHOR("Sebastian Andrzej Siewior");