1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2015-2017 Google, Inc
5 * USB Power Delivery protocol stack.
8 #include <linux/completion.h>
9 #include <linux/debugfs.h>
10 #include <linux/device.h>
11 #include <linux/hrtimer.h>
12 #include <linux/jiffies.h>
13 #include <linux/kernel.h>
14 #include <linux/kthread.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/power_supply.h>
18 #include <linux/proc_fs.h>
19 #include <linux/property.h>
20 #include <linux/sched/clock.h>
21 #include <linux/seq_file.h>
22 #include <linux/slab.h>
23 #include <linux/spinlock.h>
24 #include <linux/usb.h>
25 #include <linux/usb/pd.h>
26 #include <linux/usb/pd_ado.h>
27 #include <linux/usb/pd_bdo.h>
28 #include <linux/usb/pd_ext_sdb.h>
29 #include <linux/usb/pd_vdo.h>
30 #include <linux/usb/role.h>
31 #include <linux/usb/tcpm.h>
32 #include <linux/usb/typec_altmode.h>
34 #include <uapi/linux/sched/types.h>
36 #define FOREACH_STATE(S) \
39 S(CHECK_CONTAMINANT), \
44 S(SRC_SEND_CAPABILITIES), \
45 S(SRC_SEND_CAPABILITIES_TIMEOUT), \
46 S(SRC_NEGOTIATE_CAPABILITIES), \
47 S(SRC_TRANSITION_SUPPLY), \
49 S(SRC_WAIT_NEW_CAPABILITIES), \
57 S(SNK_DISCOVERY_DEBOUNCE), \
58 S(SNK_DISCOVERY_DEBOUNCE_DONE), \
59 S(SNK_WAIT_CAPABILITIES), \
60 S(SNK_WAIT_CAPABILITIES_TIMEOUT), \
61 S(SNK_NEGOTIATE_CAPABILITIES), \
62 S(SNK_NEGOTIATE_PPS_CAPABILITIES), \
63 S(SNK_TRANSITION_SINK), \
64 S(SNK_TRANSITION_SINK_VBUS), \
68 S(DEBUG_ACC_ATTACHED), \
69 S(AUDIO_ACC_ATTACHED), \
70 S(AUDIO_ACC_DEBOUNCE), \
73 S(HARD_RESET_START), \
74 S(SRC_HARD_RESET_VBUS_OFF), \
75 S(SRC_HARD_RESET_VBUS_ON), \
76 S(SNK_HARD_RESET_SINK_OFF), \
77 S(SNK_HARD_RESET_WAIT_VBUS), \
78 S(SNK_HARD_RESET_SINK_ON), \
81 S(SRC_SOFT_RESET_WAIT_SNK_TX), \
87 S(DR_SWAP_SEND_TIMEOUT), \
89 S(DR_SWAP_CHANGE_DR), \
93 S(PR_SWAP_SEND_TIMEOUT), \
96 S(PR_SWAP_SRC_SNK_TRANSITION_OFF), \
97 S(PR_SWAP_SRC_SNK_SOURCE_OFF), \
98 S(PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED), \
99 S(PR_SWAP_SRC_SNK_SINK_ON), \
100 S(PR_SWAP_SNK_SRC_SINK_OFF), \
101 S(PR_SWAP_SNK_SRC_SOURCE_ON), \
102 S(PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP), \
104 S(VCONN_SWAP_ACCEPT), \
105 S(VCONN_SWAP_SEND), \
106 S(VCONN_SWAP_SEND_TIMEOUT), \
107 S(VCONN_SWAP_CANCEL), \
108 S(VCONN_SWAP_START), \
109 S(VCONN_SWAP_WAIT_FOR_VCONN), \
110 S(VCONN_SWAP_TURN_ON_VCONN), \
111 S(VCONN_SWAP_TURN_OFF_VCONN), \
112 S(VCONN_SWAP_SEND_SOFT_RESET), \
115 S(FR_SWAP_SEND_TIMEOUT), \
116 S(FR_SWAP_SNK_SRC_TRANSITION_TO_OFF), \
117 S(FR_SWAP_SNK_SRC_NEW_SINK_READY), \
118 S(FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED), \
123 S(SNK_TRY_WAIT_DEBOUNCE), \
124 S(SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS), \
126 S(SRC_TRYWAIT_DEBOUNCE), \
127 S(SRC_TRYWAIT_UNATTACHED), \
131 S(SRC_TRY_DEBOUNCE), \
133 S(SNK_TRYWAIT_DEBOUNCE), \
134 S(SNK_TRYWAIT_VBUS), \
137 S(GET_STATUS_SEND), \
138 S(GET_STATUS_SEND_TIMEOUT), \
139 S(GET_PPS_STATUS_SEND), \
140 S(GET_PPS_STATUS_SEND_TIMEOUT), \
143 S(GET_SINK_CAP_TIMEOUT), \
147 S(PORT_RESET_WAIT_OFF), \
152 S(SRC_VDM_IDENTITY_REQUEST)
154 #define FOREACH_AMS(S) \
156 S(POWER_NEGOTIATION), \
161 S(GET_SOURCE_CAPABILITIES), \
162 S(GET_SINK_CAPABILITIES), \
163 S(POWER_ROLE_SWAP), \
168 S(GETTING_SOURCE_EXTENDED_CAPABILITIES),\
169 S(GETTING_SOURCE_SINK_STATUS), \
170 S(GETTING_BATTERY_CAPABILITIES), \
171 S(GETTING_BATTERY_STATUS), \
172 S(GETTING_MANUFACTURER_INFORMATION), \
174 S(FIRMWARE_UPDATE), \
175 S(DISCOVER_IDENTITY), \
176 S(SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY), \
179 S(DFP_TO_UFP_ENTER_MODE), \
180 S(DFP_TO_UFP_EXIT_MODE), \
181 S(DFP_TO_CABLE_PLUG_ENTER_MODE), \
182 S(DFP_TO_CABLE_PLUG_EXIT_MODE), \
185 S(UNSTRUCTURED_VDMS), \
186 S(STRUCTURED_VDMS), \
190 #define GENERATE_ENUM(e) e
191 #define GENERATE_STRING(s) #s
194 FOREACH_STATE(GENERATE_ENUM)
197 static const char * const tcpm_states[] = {
198 FOREACH_STATE(GENERATE_STRING)
202 FOREACH_AMS(GENERATE_ENUM)
205 static const char * const tcpm_ams_str[] = {
206 FOREACH_AMS(GENERATE_STRING)
210 VDM_STATE_ERR_BUSY = -3,
211 VDM_STATE_ERR_SEND = -2,
212 VDM_STATE_ERR_TMOUT = -1,
214 /* Anything >0 represents an active state */
217 VDM_STATE_WAIT_RSP_BUSY = 3,
218 VDM_STATE_SEND_MESSAGE = 4,
221 enum pd_msg_request {
225 PD_MSG_CTRL_NOT_SUPP,
226 PD_MSG_DATA_SINK_CAP,
227 PD_MSG_DATA_SOURCE_CAP,
232 ADEV_NOTIFY_USB_AND_QUEUE_VDM,
234 ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL,
239 * Initial current capability of the new source when vSafe5V is applied during PD3.0 Fast Role Swap.
240 * Based on "Table 6-14 Fixed Supply PDO - Sink" of "USB Power Delivery Specification Revision 3.0,
243 enum frs_typec_current {
250 /* Events from low level driver */
252 #define TCPM_CC_EVENT BIT(0)
253 #define TCPM_VBUS_EVENT BIT(1)
254 #define TCPM_RESET_EVENT BIT(2)
255 #define TCPM_FRS_EVENT BIT(3)
256 #define TCPM_SOURCING_VBUS BIT(4)
257 #define TCPM_PORT_CLEAN BIT(5)
258 #define TCPM_PORT_ERROR BIT(6)
260 #define LOG_BUFFER_ENTRIES 1024
261 #define LOG_BUFFER_ENTRY_SIZE 128
263 /* Alternate mode support */
265 #define SVID_DISCOVERY_MAX 16
266 #define ALTMODE_DISCOVERY_MAX (SVID_DISCOVERY_MAX * MODE_DISCOVERY_MAX)
268 #define GET_SINK_CAP_RETRY_MS 100
269 #define SEND_DISCOVER_RETRY_MS 100
271 struct pd_mode_data {
272 int svid_index; /* current SVID index */
274 u16 svids[SVID_DISCOVERY_MAX];
275 int altmodes; /* number of alternate modes */
276 struct typec_altmode_desc altmode_desc[ALTMODE_DISCOVERY_MAX];
280 * @min_volt: Actual min voltage at the local port
281 * @req_min_volt: Requested min voltage to the port partner
282 * @max_volt: Actual max voltage at the local port
283 * @req_max_volt: Requested max voltage to the port partner
284 * @max_curr: Actual max current at the local port
285 * @req_max_curr: Requested max current of the port partner
286 * @req_out_volt: Requested output voltage to the port partner
287 * @req_op_curr: Requested operating current to the port partner
288 * @supported: Parter has at least one APDO hence supports PPS
289 * @active: PPS mode is active
305 struct usb_power_delivery *pd;
306 struct usb_power_delivery_capabilities *source_cap;
307 struct usb_power_delivery_capabilities_desc source_desc;
308 struct usb_power_delivery_capabilities *sink_cap;
309 struct usb_power_delivery_capabilities_desc sink_desc;
310 unsigned int operating_snk_mw;
314 * @sink_wait_cap_time: Deadline (in ms) for tTypeCSinkWaitCap timer
315 * @ps_src_wait_off_time: Deadline (in ms) for tPSSourceOff timer
316 * @cc_debounce_time: Deadline (in ms) for tCCDebounce timer
319 u32 sink_wait_cap_time;
321 u32 cc_debounce_time;
322 u32 snk_bc12_cmpletion_time;
328 struct mutex lock; /* tcpm state machine lock */
329 struct kthread_worker *wq;
331 struct typec_capability typec_caps;
332 struct typec_port *typec_port;
334 struct tcpc_dev *tcpc;
335 struct usb_role_switch *role_sw;
337 enum typec_role vconn_role;
338 enum typec_role pwr_role;
339 enum typec_data_role data_role;
340 enum typec_pwr_opmode pwr_opmode;
342 struct usb_pd_identity partner_ident;
343 struct typec_partner_desc partner_desc;
344 struct typec_partner *partner;
346 struct usb_pd_identity cable_ident;
347 struct typec_cable_desc cable_desc;
348 struct typec_cable *cable;
349 struct typec_plug_desc plug_prime_desc;
350 struct typec_plug *plug_prime;
352 enum typec_cc_status cc_req;
353 enum typec_cc_status src_rp; /* work only if pd_supported == false */
355 enum typec_cc_status cc1;
356 enum typec_cc_status cc2;
357 enum typec_cc_polarity polarity;
363 enum typec_port_type port_type;
366 * Set to true when vbus is greater than VSAFE5V min.
367 * Set to false when vbus falls below vSinkDisconnect max threshold.
372 * Set to true when vbus is less than VSAFE0V max.
373 * Set to false when vbus is greater than VSAFE0V max.
381 /* Set to true when Discover_Identity Command is expected to be sent in Ready states. */
389 enum pd_msg_request queued_message;
391 enum tcpm_state enter_state;
392 enum tcpm_state prev_state;
393 enum tcpm_state state;
394 enum tcpm_state delayed_state;
395 ktime_t delayed_runtime;
396 unsigned long delay_ms;
398 spinlock_t pd_event_lock;
401 struct kthread_work event_work;
402 struct hrtimer state_machine_timer;
403 struct kthread_work state_machine;
404 struct hrtimer vdm_state_machine_timer;
405 struct kthread_work vdm_state_machine;
406 struct hrtimer enable_frs_timer;
407 struct kthread_work enable_frs;
408 struct hrtimer send_discover_timer;
409 struct kthread_work send_discover_work;
410 bool state_machine_running;
411 /* Set to true when VDM State Machine has following actions. */
414 struct completion tx_complete;
415 enum tcpm_transmit_status tx_status;
417 struct mutex swap_lock; /* swap command lock */
419 bool non_pd_role_swap;
420 struct completion swap_complete;
423 unsigned int negotiated_rev;
424 unsigned int message_id;
425 unsigned int caps_count;
426 unsigned int hard_reset_count;
428 bool explicit_contract;
429 unsigned int rx_msgid;
432 struct usb_power_delivery **pds;
433 struct pd_data **pd_list;
434 struct usb_power_delivery_capabilities *port_source_caps;
435 struct usb_power_delivery_capabilities *port_sink_caps;
436 struct usb_power_delivery *partner_pd;
437 struct usb_power_delivery_capabilities *partner_source_caps;
438 struct usb_power_delivery_capabilities *partner_sink_caps;
439 struct usb_power_delivery *selected_pd;
441 /* Partner capabilities/requests */
443 u32 source_caps[PDO_MAX_OBJECTS];
444 unsigned int nr_source_caps;
445 u32 sink_caps[PDO_MAX_OBJECTS];
446 unsigned int nr_sink_caps;
448 /* Local capabilities */
449 unsigned int pd_count;
450 u32 src_pdo[PDO_MAX_OBJECTS];
451 unsigned int nr_src_pdo;
452 u32 snk_pdo[PDO_MAX_OBJECTS];
453 unsigned int nr_snk_pdo;
454 u32 snk_vdo_v1[VDO_MAX_OBJECTS];
455 unsigned int nr_snk_vdo_v1;
456 u32 snk_vdo[VDO_MAX_OBJECTS];
457 unsigned int nr_snk_vdo;
459 unsigned int operating_snk_mw;
460 bool update_sink_caps;
462 /* Requested current / voltage to the port partner */
463 u32 req_current_limit;
464 u32 req_supply_voltage;
465 /* Actual current / voltage limit of the local port */
469 /* Used to export TA voltage and current */
470 struct power_supply *psy;
471 struct power_supply_desc psy_desc;
472 enum power_supply_usb_type usb_type;
476 /* PD state for Vendor Defined Messages */
477 enum vdm_states vdm_state;
479 /* next Vendor Defined Message to send */
480 u32 vdo_data[VDO_MAX_SIZE];
482 /* VDO to retry if UFP responder replied busy */
486 struct pd_pps_data pps_data;
487 struct completion pps_complete;
491 /* Alternate mode data */
492 struct pd_mode_data mode_data;
493 struct pd_mode_data mode_data_prime;
494 struct typec_altmode *partner_altmode[ALTMODE_DISCOVERY_MAX];
495 struct typec_altmode *plug_prime_altmode[ALTMODE_DISCOVERY_MAX];
496 struct typec_altmode *port_altmode[ALTMODE_DISCOVERY_MAX];
498 /* Deadline in jiffies to exit src_try_wait state */
499 unsigned long max_wait;
501 /* port belongs to a self powered device */
505 enum frs_typec_current new_source_frs_current;
507 /* Sink caps have been queried */
510 /* Collision Avoidance and Atomic Message Sequence */
511 enum tcpm_state upcoming_state;
513 enum tcpm_ams next_ams;
516 /* Auto vbus discharge status */
517 bool auto_vbus_discharge_enabled;
520 * When set, port requests PD_P_SNK_STDBY_MW upon entering SNK_DISCOVERY and
521 * the actual current limit after RX of PD_CTRL_PSRDY for PD link,
522 * SNK_READY for non-pd link.
524 bool slow_charger_loop;
527 * When true indicates that the lower level drivers indicate potential presence
528 * of contaminant in the connector pins based on the tcpm state machine
531 bool potential_contaminant;
533 /* SOP* Related Fields */
535 * Flag to determine if SOP' Discover Identity is available. The flag
536 * is set if Discover Identity on SOP' does not immediately follow
537 * Discover Identity on SOP.
539 bool send_discover_prime;
541 * tx_sop_type determines which SOP* a message is being sent on.
542 * For messages that are queued and not sent immediately such as in
543 * tcpm_queue_message or messages that send after state changes,
544 * the tx_sop_type is set accordingly.
546 enum tcpm_transmit_type tx_sop_type;
548 * Prior to discovering the port partner's Specification Revision, the
549 * Vconn source and cable plug will use the lower of their two revisions.
551 * When the port partner's Specification Revision is discovered, the following
552 * rules are put in place.
553 * 1. If the cable revision (1) is lower than the revision negotiated
554 * between the port and partner (2), the port and partner will communicate
555 * on revision (2), but the port and cable will communicate on revision (1).
556 * 2. If the cable revision (1) is higher than the revision negotiated
557 * between the port and partner (2), the port and partner will communicate
558 * on revision (2), and the port and cable will communicate on revision (2)
561 unsigned int negotiated_rev_prime;
563 * Each SOP* type must maintain their own tx and rx message IDs
565 unsigned int message_id_prime;
566 unsigned int rx_msgid_prime;
568 /* Timer deadline values configured at runtime */
569 struct pd_timings timings;
570 #ifdef CONFIG_DEBUG_FS
571 struct dentry *dentry;
572 struct mutex logbuffer_lock; /* log buffer access lock */
575 u8 *logbuffer[LOG_BUFFER_ENTRIES];
580 struct kthread_work work;
581 struct tcpm_port *port;
582 struct pd_message msg;
583 enum tcpm_transmit_type rx_sop_type;
586 static const char * const pd_rev[] = {
592 #define tcpm_cc_is_sink(cc) \
593 ((cc) == TYPEC_CC_RP_DEF || (cc) == TYPEC_CC_RP_1_5 || \
594 (cc) == TYPEC_CC_RP_3_0)
596 /* As long as cc is pulled up, we can consider it as sink. */
597 #define tcpm_port_is_sink(port) \
598 (tcpm_cc_is_sink((port)->cc1) || tcpm_cc_is_sink((port)->cc2))
600 #define tcpm_cc_is_source(cc) ((cc) == TYPEC_CC_RD)
601 #define tcpm_cc_is_audio(cc) ((cc) == TYPEC_CC_RA)
602 #define tcpm_cc_is_open(cc) ((cc) == TYPEC_CC_OPEN)
604 #define tcpm_port_is_source(port) \
605 ((tcpm_cc_is_source((port)->cc1) && \
606 !tcpm_cc_is_source((port)->cc2)) || \
607 (tcpm_cc_is_source((port)->cc2) && \
608 !tcpm_cc_is_source((port)->cc1)))
610 #define tcpm_port_is_debug(port) \
611 (tcpm_cc_is_source((port)->cc1) && tcpm_cc_is_source((port)->cc2))
613 #define tcpm_port_is_audio(port) \
614 (tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_audio((port)->cc2))
616 #define tcpm_port_is_audio_detached(port) \
617 ((tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_open((port)->cc2)) || \
618 (tcpm_cc_is_audio((port)->cc2) && tcpm_cc_is_open((port)->cc1)))
620 #define tcpm_try_snk(port) \
621 ((port)->try_snk_count == 0 && (port)->try_role == TYPEC_SINK && \
622 (port)->port_type == TYPEC_PORT_DRP)
624 #define tcpm_try_src(port) \
625 ((port)->try_src_count == 0 && (port)->try_role == TYPEC_SOURCE && \
626 (port)->port_type == TYPEC_PORT_DRP)
628 #define tcpm_data_role_for_source(port) \
629 ((port)->typec_caps.data == TYPEC_PORT_UFP ? \
630 TYPEC_DEVICE : TYPEC_HOST)
632 #define tcpm_data_role_for_sink(port) \
633 ((port)->typec_caps.data == TYPEC_PORT_DFP ? \
634 TYPEC_HOST : TYPEC_DEVICE)
636 #define tcpm_sink_tx_ok(port) \
637 (tcpm_port_is_sink(port) && \
638 ((port)->cc1 == TYPEC_CC_RP_3_0 || (port)->cc2 == TYPEC_CC_RP_3_0))
640 #define tcpm_wait_for_discharge(port) \
641 (((port)->auto_vbus_discharge_enabled && !(port)->vbus_vsafe0v) ? PD_T_SAFE_0V : 0)
643 static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
645 if (port->port_type == TYPEC_PORT_DRP) {
646 if (port->try_role == TYPEC_SINK)
647 return SNK_UNATTACHED;
648 else if (port->try_role == TYPEC_SOURCE)
649 return SRC_UNATTACHED;
650 /* Fall through to return SRC_UNATTACHED */
651 } else if (port->port_type == TYPEC_PORT_SNK) {
652 return SNK_UNATTACHED;
654 return SRC_UNATTACHED;
657 static bool tcpm_port_is_disconnected(struct tcpm_port *port)
659 return (!port->attached && port->cc1 == TYPEC_CC_OPEN &&
660 port->cc2 == TYPEC_CC_OPEN) ||
661 (port->attached && ((port->polarity == TYPEC_POLARITY_CC1 &&
662 port->cc1 == TYPEC_CC_OPEN) ||
663 (port->polarity == TYPEC_POLARITY_CC2 &&
664 port->cc2 == TYPEC_CC_OPEN)));
671 #ifdef CONFIG_DEBUG_FS
673 static bool tcpm_log_full(struct tcpm_port *port)
675 return port->logbuffer_tail ==
676 (port->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
680 static void _tcpm_log(struct tcpm_port *port, const char *fmt, va_list args)
682 char tmpbuffer[LOG_BUFFER_ENTRY_SIZE];
683 u64 ts_nsec = local_clock();
684 unsigned long rem_nsec;
686 mutex_lock(&port->logbuffer_lock);
687 if (!port->logbuffer[port->logbuffer_head]) {
688 port->logbuffer[port->logbuffer_head] =
689 kzalloc(LOG_BUFFER_ENTRY_SIZE, GFP_KERNEL);
690 if (!port->logbuffer[port->logbuffer_head]) {
691 mutex_unlock(&port->logbuffer_lock);
696 vsnprintf(tmpbuffer, sizeof(tmpbuffer), fmt, args);
698 if (tcpm_log_full(port)) {
699 port->logbuffer_head = max(port->logbuffer_head - 1, 0);
700 strcpy(tmpbuffer, "overflow");
703 if (port->logbuffer_head < 0 ||
704 port->logbuffer_head >= LOG_BUFFER_ENTRIES) {
706 "Bad log buffer index %d\n", port->logbuffer_head);
710 if (!port->logbuffer[port->logbuffer_head]) {
712 "Log buffer index %d is NULL\n", port->logbuffer_head);
716 rem_nsec = do_div(ts_nsec, 1000000000);
717 scnprintf(port->logbuffer[port->logbuffer_head],
718 LOG_BUFFER_ENTRY_SIZE, "[%5lu.%06lu] %s",
719 (unsigned long)ts_nsec, rem_nsec / 1000,
721 port->logbuffer_head = (port->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
724 mutex_unlock(&port->logbuffer_lock);
728 static void tcpm_log(struct tcpm_port *port, const char *fmt, ...)
732 /* Do not log while disconnected and unattached */
733 if (tcpm_port_is_disconnected(port) &&
734 (port->state == SRC_UNATTACHED || port->state == SNK_UNATTACHED ||
735 port->state == TOGGLING || port->state == CHECK_CONTAMINANT))
739 _tcpm_log(port, fmt, args);
744 static void tcpm_log_force(struct tcpm_port *port, const char *fmt, ...)
749 _tcpm_log(port, fmt, args);
753 static void tcpm_log_source_caps(struct tcpm_port *port)
757 for (i = 0; i < port->nr_source_caps; i++) {
758 u32 pdo = port->source_caps[i];
759 enum pd_pdo_type type = pdo_type(pdo);
764 scnprintf(msg, sizeof(msg),
765 "%u mV, %u mA [%s%s%s%s%s%s]",
766 pdo_fixed_voltage(pdo),
767 pdo_max_current(pdo),
768 (pdo & PDO_FIXED_DUAL_ROLE) ?
770 (pdo & PDO_FIXED_SUSPEND) ?
772 (pdo & PDO_FIXED_HIGHER_CAP) ?
774 (pdo & PDO_FIXED_USB_COMM) ?
776 (pdo & PDO_FIXED_DATA_SWAP) ?
778 (pdo & PDO_FIXED_EXTPOWER) ?
782 scnprintf(msg, sizeof(msg),
784 pdo_min_voltage(pdo),
785 pdo_max_voltage(pdo),
786 pdo_max_current(pdo));
789 scnprintf(msg, sizeof(msg),
791 pdo_min_voltage(pdo),
792 pdo_max_voltage(pdo),
796 if (pdo_apdo_type(pdo) == APDO_TYPE_PPS)
797 scnprintf(msg, sizeof(msg),
799 pdo_pps_apdo_min_voltage(pdo),
800 pdo_pps_apdo_max_voltage(pdo),
801 pdo_pps_apdo_max_current(pdo));
803 strcpy(msg, "undefined APDO");
806 strcpy(msg, "undefined");
809 tcpm_log(port, " PDO %d: type %d, %s",
814 static int tcpm_debug_show(struct seq_file *s, void *v)
816 struct tcpm_port *port = s->private;
819 mutex_lock(&port->logbuffer_lock);
820 tail = port->logbuffer_tail;
821 while (tail != port->logbuffer_head) {
822 seq_printf(s, "%s\n", port->logbuffer[tail]);
823 tail = (tail + 1) % LOG_BUFFER_ENTRIES;
825 if (!seq_has_overflowed(s))
826 port->logbuffer_tail = tail;
827 mutex_unlock(&port->logbuffer_lock);
831 DEFINE_SHOW_ATTRIBUTE(tcpm_debug);
833 static void tcpm_debugfs_init(struct tcpm_port *port)
837 mutex_init(&port->logbuffer_lock);
838 snprintf(name, NAME_MAX, "tcpm-%s", dev_name(port->dev));
839 port->dentry = debugfs_create_dir(name, usb_debug_root);
840 debugfs_create_file("log", S_IFREG | 0444, port->dentry, port,
844 static void tcpm_debugfs_exit(struct tcpm_port *port)
848 mutex_lock(&port->logbuffer_lock);
849 for (i = 0; i < LOG_BUFFER_ENTRIES; i++) {
850 kfree(port->logbuffer[i]);
851 port->logbuffer[i] = NULL;
853 mutex_unlock(&port->logbuffer_lock);
855 debugfs_remove(port->dentry);
861 static void tcpm_log(const struct tcpm_port *port, const char *fmt, ...) { }
863 static void tcpm_log_force(struct tcpm_port *port, const char *fmt, ...) { }
864 static void tcpm_log_source_caps(struct tcpm_port *port) { }
865 static void tcpm_debugfs_init(const struct tcpm_port *port) { }
866 static void tcpm_debugfs_exit(const struct tcpm_port *port) { }
870 static void tcpm_set_cc(struct tcpm_port *port, enum typec_cc_status cc)
872 tcpm_log(port, "cc:=%d", cc);
874 port->tcpc->set_cc(port->tcpc, cc);
877 static int tcpm_enable_auto_vbus_discharge(struct tcpm_port *port, bool enable)
881 if (port->tcpc->enable_auto_vbus_discharge) {
882 ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, enable);
883 tcpm_log_force(port, "%s vbus discharge ret:%d", enable ? "enable" : "disable",
886 port->auto_vbus_discharge_enabled = enable;
892 static void tcpm_apply_rc(struct tcpm_port *port)
895 * TCPCI: Move to APPLY_RC state to prevent disconnect during PR_SWAP
896 * when Vbus auto discharge on disconnect is enabled.
898 if (port->tcpc->enable_auto_vbus_discharge && port->tcpc->apply_rc) {
899 tcpm_log(port, "Apply_RC");
900 port->tcpc->apply_rc(port->tcpc, port->cc_req, port->polarity);
901 tcpm_enable_auto_vbus_discharge(port, false);
906 * Determine RP value to set based on maximum current supported
907 * by a port if configured as source.
908 * Returns CC value to report to link partner.
910 static enum typec_cc_status tcpm_rp_cc(struct tcpm_port *port)
912 const u32 *src_pdo = port->src_pdo;
913 int nr_pdo = port->nr_src_pdo;
916 if (!port->pd_supported)
920 * Search for first entry with matching voltage.
921 * It should report the maximum supported current.
923 for (i = 0; i < nr_pdo; i++) {
924 const u32 pdo = src_pdo[i];
926 if (pdo_type(pdo) == PDO_TYPE_FIXED &&
927 pdo_fixed_voltage(pdo) == 5000) {
928 unsigned int curr = pdo_max_current(pdo);
931 return TYPEC_CC_RP_3_0;
932 else if (curr >= 1500)
933 return TYPEC_CC_RP_1_5;
934 return TYPEC_CC_RP_DEF;
938 return TYPEC_CC_RP_DEF;
941 static void tcpm_ams_finish(struct tcpm_port *port)
943 tcpm_log(port, "AMS %s finished", tcpm_ams_str[port->ams]);
945 if (port->pd_capable && port->pwr_role == TYPEC_SOURCE) {
946 if (port->negotiated_rev >= PD_REV30)
947 tcpm_set_cc(port, SINK_TX_OK);
949 tcpm_set_cc(port, SINK_TX_NG);
950 } else if (port->pwr_role == TYPEC_SOURCE) {
951 tcpm_set_cc(port, tcpm_rp_cc(port));
954 port->in_ams = false;
955 port->ams = NONE_AMS;
958 static int tcpm_pd_transmit(struct tcpm_port *port,
959 enum tcpm_transmit_type tx_sop_type,
960 const struct pd_message *msg)
962 unsigned long time_left;
964 unsigned int negotiated_rev;
966 switch (tx_sop_type) {
967 case TCPC_TX_SOP_PRIME:
968 negotiated_rev = port->negotiated_rev_prime;
972 negotiated_rev = port->negotiated_rev;
977 tcpm_log(port, "PD TX, header: %#x", le16_to_cpu(msg->header));
979 tcpm_log(port, "PD TX, type: %#x", tx_sop_type);
981 reinit_completion(&port->tx_complete);
982 ret = port->tcpc->pd_transmit(port->tcpc, tx_sop_type, msg, negotiated_rev);
986 mutex_unlock(&port->lock);
987 time_left = wait_for_completion_timeout(&port->tx_complete,
988 msecs_to_jiffies(PD_T_TCPC_TX_TIMEOUT));
989 mutex_lock(&port->lock);
993 switch (port->tx_status) {
994 case TCPC_TX_SUCCESS:
995 switch (tx_sop_type) {
996 case TCPC_TX_SOP_PRIME:
997 port->message_id_prime = (port->message_id_prime + 1) &
1002 port->message_id = (port->message_id + 1) &
1007 * USB PD rev 2.0, 8.3.2.2.1:
1008 * USB PD rev 3.0, 8.3.2.1.3:
1009 * "... Note that every AMS is Interruptible until the first
1010 * Message in the sequence has been successfully sent (GoodCRC
1011 * Message received)."
1013 if (port->ams != NONE_AMS)
1014 port->in_ams = true;
1016 case TCPC_TX_DISCARDED:
1019 case TCPC_TX_FAILED:
1025 /* Some AMS don't expect responses. Finish them here. */
1026 if (port->ams == ATTENTION || port->ams == SOURCE_ALERT)
1027 tcpm_ams_finish(port);
1032 void tcpm_pd_transmit_complete(struct tcpm_port *port,
1033 enum tcpm_transmit_status status)
1035 tcpm_log(port, "PD TX complete, status: %u", status);
1036 port->tx_status = status;
1037 complete(&port->tx_complete);
1039 EXPORT_SYMBOL_GPL(tcpm_pd_transmit_complete);
1041 static int tcpm_mux_set(struct tcpm_port *port, int state,
1042 enum usb_role usb_role,
1043 enum typec_orientation orientation)
1047 tcpm_log(port, "Requesting mux state %d, usb-role %d, orientation %d",
1048 state, usb_role, orientation);
1050 ret = typec_set_orientation(port->typec_port, orientation);
1054 if (port->role_sw) {
1055 ret = usb_role_switch_set_role(port->role_sw, usb_role);
1060 return typec_set_mode(port->typec_port, state);
1063 static int tcpm_set_polarity(struct tcpm_port *port,
1064 enum typec_cc_polarity polarity)
1068 tcpm_log(port, "polarity %d", polarity);
1070 ret = port->tcpc->set_polarity(port->tcpc, polarity);
1074 port->polarity = polarity;
1079 static int tcpm_set_vconn(struct tcpm_port *port, bool enable)
1083 tcpm_log(port, "vconn:=%d", enable);
1085 ret = port->tcpc->set_vconn(port->tcpc, enable);
1087 port->vconn_role = enable ? TYPEC_SOURCE : TYPEC_SINK;
1088 typec_set_vconn_role(port->typec_port, port->vconn_role);
1094 static u32 tcpm_get_current_limit(struct tcpm_port *port)
1096 enum typec_cc_status cc;
1099 cc = port->polarity ? port->cc2 : port->cc1;
1101 case TYPEC_CC_RP_1_5:
1104 case TYPEC_CC_RP_3_0:
1107 case TYPEC_CC_RP_DEF:
1109 if (port->tcpc->get_current_limit)
1110 limit = port->tcpc->get_current_limit(port->tcpc);
1119 static int tcpm_set_current_limit(struct tcpm_port *port, u32 max_ma, u32 mv)
1121 int ret = -EOPNOTSUPP;
1123 tcpm_log(port, "Setting voltage/current limit %u mV %u mA", mv, max_ma);
1125 port->supply_voltage = mv;
1126 port->current_limit = max_ma;
1127 power_supply_changed(port->psy);
1129 if (port->tcpc->set_current_limit)
1130 ret = port->tcpc->set_current_limit(port->tcpc, max_ma, mv);
1135 static int tcpm_set_attached_state(struct tcpm_port *port, bool attached)
1137 return port->tcpc->set_roles(port->tcpc, attached, port->pwr_role,
1141 static int tcpm_set_roles(struct tcpm_port *port, bool attached,
1142 enum typec_role role, enum typec_data_role data)
1144 enum typec_orientation orientation;
1145 enum usb_role usb_role;
1148 if (port->polarity == TYPEC_POLARITY_CC1)
1149 orientation = TYPEC_ORIENTATION_NORMAL;
1151 orientation = TYPEC_ORIENTATION_REVERSE;
1153 if (port->typec_caps.data == TYPEC_PORT_DRD) {
1154 if (data == TYPEC_HOST)
1155 usb_role = USB_ROLE_HOST;
1157 usb_role = USB_ROLE_DEVICE;
1158 } else if (port->typec_caps.data == TYPEC_PORT_DFP) {
1159 if (data == TYPEC_HOST) {
1160 if (role == TYPEC_SOURCE)
1161 usb_role = USB_ROLE_HOST;
1163 usb_role = USB_ROLE_NONE;
1168 if (data == TYPEC_DEVICE) {
1169 if (role == TYPEC_SINK)
1170 usb_role = USB_ROLE_DEVICE;
1172 usb_role = USB_ROLE_NONE;
1178 ret = tcpm_mux_set(port, TYPEC_STATE_USB, usb_role, orientation);
1182 ret = port->tcpc->set_roles(port->tcpc, attached, role, data);
1186 if (port->tcpc->set_orientation) {
1187 ret = port->tcpc->set_orientation(port->tcpc, orientation);
1192 port->pwr_role = role;
1193 port->data_role = data;
1194 typec_set_data_role(port->typec_port, data);
1195 typec_set_pwr_role(port->typec_port, role);
1200 static int tcpm_set_pwr_role(struct tcpm_port *port, enum typec_role role)
1204 ret = port->tcpc->set_roles(port->tcpc, true, role,
1209 port->pwr_role = role;
1210 typec_set_pwr_role(port->typec_port, role);
1216 * Transform the PDO to be compliant to PD rev2.0.
1217 * Return 0 if the PDO type is not defined in PD rev2.0.
1218 * Otherwise, return the converted PDO.
1220 static u32 tcpm_forge_legacy_pdo(struct tcpm_port *port, u32 pdo, enum typec_role role)
1222 switch (pdo_type(pdo)) {
1223 case PDO_TYPE_FIXED:
1224 if (role == TYPEC_SINK)
1225 return pdo & ~PDO_FIXED_FRS_CURR_MASK;
1227 return pdo & ~PDO_FIXED_UNCHUNK_EXT;
1237 static int tcpm_pd_send_source_caps(struct tcpm_port *port)
1239 struct pd_message msg;
1241 unsigned int i, nr_pdo = 0;
1243 memset(&msg, 0, sizeof(msg));
1245 for (i = 0; i < port->nr_src_pdo; i++) {
1246 if (port->negotiated_rev >= PD_REV30) {
1247 msg.payload[nr_pdo++] = cpu_to_le32(port->src_pdo[i]);
1249 pdo = tcpm_forge_legacy_pdo(port, port->src_pdo[i], TYPEC_SOURCE);
1251 msg.payload[nr_pdo++] = cpu_to_le32(pdo);
1256 /* No source capabilities defined, sink only */
1257 msg.header = PD_HEADER_LE(PD_CTRL_REJECT,
1260 port->negotiated_rev,
1261 port->message_id, 0);
1263 msg.header = PD_HEADER_LE(PD_DATA_SOURCE_CAP,
1266 port->negotiated_rev,
1271 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1274 static int tcpm_pd_send_sink_caps(struct tcpm_port *port)
1276 struct pd_message msg;
1278 unsigned int i, nr_pdo = 0;
1280 memset(&msg, 0, sizeof(msg));
1282 for (i = 0; i < port->nr_snk_pdo; i++) {
1283 if (port->negotiated_rev >= PD_REV30) {
1284 msg.payload[nr_pdo++] = cpu_to_le32(port->snk_pdo[i]);
1286 pdo = tcpm_forge_legacy_pdo(port, port->snk_pdo[i], TYPEC_SINK);
1288 msg.payload[nr_pdo++] = cpu_to_le32(pdo);
1293 /* No sink capabilities defined, source only */
1294 msg.header = PD_HEADER_LE(PD_CTRL_REJECT,
1297 port->negotiated_rev,
1298 port->message_id, 0);
1300 msg.header = PD_HEADER_LE(PD_DATA_SINK_CAP,
1303 port->negotiated_rev,
1308 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1311 static void mod_tcpm_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
1314 hrtimer_start(&port->state_machine_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
1316 hrtimer_cancel(&port->state_machine_timer);
1317 kthread_queue_work(port->wq, &port->state_machine);
1321 static void mod_vdm_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
1324 hrtimer_start(&port->vdm_state_machine_timer, ms_to_ktime(delay_ms),
1327 hrtimer_cancel(&port->vdm_state_machine_timer);
1328 kthread_queue_work(port->wq, &port->vdm_state_machine);
1332 static void mod_enable_frs_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
1335 hrtimer_start(&port->enable_frs_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
1337 hrtimer_cancel(&port->enable_frs_timer);
1338 kthread_queue_work(port->wq, &port->enable_frs);
1342 static void mod_send_discover_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
1345 hrtimer_start(&port->send_discover_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
1347 hrtimer_cancel(&port->send_discover_timer);
1348 kthread_queue_work(port->wq, &port->send_discover_work);
1352 static void tcpm_set_state(struct tcpm_port *port, enum tcpm_state state,
1353 unsigned int delay_ms)
1356 tcpm_log(port, "pending state change %s -> %s @ %u ms [%s %s]",
1357 tcpm_states[port->state], tcpm_states[state], delay_ms,
1358 pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]);
1359 port->delayed_state = state;
1360 mod_tcpm_delayed_work(port, delay_ms);
1361 port->delayed_runtime = ktime_add(ktime_get(), ms_to_ktime(delay_ms));
1362 port->delay_ms = delay_ms;
1364 tcpm_log(port, "state change %s -> %s [%s %s]",
1365 tcpm_states[port->state], tcpm_states[state],
1366 pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]);
1367 port->delayed_state = INVALID_STATE;
1368 port->prev_state = port->state;
1369 port->state = state;
1371 * Don't re-queue the state machine work item if we're currently
1372 * in the state machine and we're immediately changing states.
1373 * tcpm_state_machine_work() will continue running the state
1376 if (!port->state_machine_running)
1377 mod_tcpm_delayed_work(port, 0);
1381 static void tcpm_set_state_cond(struct tcpm_port *port, enum tcpm_state state,
1382 unsigned int delay_ms)
1384 if (port->enter_state == port->state)
1385 tcpm_set_state(port, state, delay_ms);
1388 "skipped %sstate change %s -> %s [%u ms], context state %s [%s %s]",
1389 delay_ms ? "delayed " : "",
1390 tcpm_states[port->state], tcpm_states[state],
1391 delay_ms, tcpm_states[port->enter_state],
1392 pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]);
1395 static void tcpm_queue_message(struct tcpm_port *port,
1396 enum pd_msg_request message)
1398 port->queued_message = message;
1399 mod_tcpm_delayed_work(port, 0);
1402 static bool tcpm_vdm_ams(struct tcpm_port *port)
1404 switch (port->ams) {
1405 case DISCOVER_IDENTITY:
1406 case SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY:
1407 case DISCOVER_SVIDS:
1408 case DISCOVER_MODES:
1409 case DFP_TO_UFP_ENTER_MODE:
1410 case DFP_TO_UFP_EXIT_MODE:
1411 case DFP_TO_CABLE_PLUG_ENTER_MODE:
1412 case DFP_TO_CABLE_PLUG_EXIT_MODE:
1414 case UNSTRUCTURED_VDMS:
1415 case STRUCTURED_VDMS:
1424 static bool tcpm_ams_interruptible(struct tcpm_port *port)
1426 switch (port->ams) {
1427 /* Interruptible AMS */
1430 case FIRMWARE_UPDATE:
1431 case DISCOVER_IDENTITY:
1432 case SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY:
1433 case DISCOVER_SVIDS:
1434 case DISCOVER_MODES:
1435 case DFP_TO_UFP_ENTER_MODE:
1436 case DFP_TO_UFP_EXIT_MODE:
1437 case DFP_TO_CABLE_PLUG_ENTER_MODE:
1438 case DFP_TO_CABLE_PLUG_EXIT_MODE:
1439 case UNSTRUCTURED_VDMS:
1440 case STRUCTURED_VDMS:
1444 /* Non-Interruptible AMS */
1454 static int tcpm_ams_start(struct tcpm_port *port, enum tcpm_ams ams)
1458 tcpm_log(port, "AMS %s start", tcpm_ams_str[ams]);
1460 if (!tcpm_ams_interruptible(port) &&
1461 !(ams == HARD_RESET || ams == SOFT_RESET_AMS)) {
1462 port->upcoming_state = INVALID_STATE;
1463 tcpm_log(port, "AMS %s not interruptible, aborting",
1464 tcpm_ams_str[port->ams]);
1468 if (port->pwr_role == TYPEC_SOURCE) {
1469 enum typec_cc_status cc_req = port->cc_req;
1473 if (ams == HARD_RESET) {
1474 tcpm_set_cc(port, tcpm_rp_cc(port));
1475 tcpm_pd_transmit(port, TCPC_TX_HARD_RESET, NULL);
1476 tcpm_set_state(port, HARD_RESET_START, 0);
1478 } else if (ams == SOFT_RESET_AMS) {
1479 if (!port->explicit_contract)
1480 tcpm_set_cc(port, tcpm_rp_cc(port));
1481 tcpm_set_state(port, SOFT_RESET_SEND, 0);
1483 } else if (tcpm_vdm_ams(port)) {
1484 /* tSinkTx is enforced in vdm_run_state_machine */
1485 if (port->negotiated_rev >= PD_REV30)
1486 tcpm_set_cc(port, SINK_TX_NG);
1490 if (port->negotiated_rev >= PD_REV30)
1491 tcpm_set_cc(port, SINK_TX_NG);
1493 switch (port->state) {
1496 case SRC_SOFT_RESET_WAIT_SNK_TX:
1498 case SOFT_RESET_SEND:
1499 if (port->negotiated_rev >= PD_REV30)
1500 tcpm_set_state(port, AMS_START,
1501 cc_req == SINK_TX_OK ?
1504 tcpm_set_state(port, AMS_START, 0);
1507 if (port->negotiated_rev >= PD_REV30)
1508 tcpm_set_state(port, SRC_READY,
1509 cc_req == SINK_TX_OK ?
1512 tcpm_set_state(port, SRC_READY, 0);
1516 if (port->negotiated_rev >= PD_REV30 &&
1517 !tcpm_sink_tx_ok(port) &&
1518 ams != SOFT_RESET_AMS &&
1519 ams != HARD_RESET) {
1520 port->upcoming_state = INVALID_STATE;
1521 tcpm_log(port, "Sink TX No Go");
1527 if (ams == HARD_RESET) {
1528 tcpm_pd_transmit(port, TCPC_TX_HARD_RESET, NULL);
1529 tcpm_set_state(port, HARD_RESET_START, 0);
1531 } else if (tcpm_vdm_ams(port)) {
1535 if (port->state == SNK_READY ||
1536 port->state == SNK_SOFT_RESET)
1537 tcpm_set_state(port, AMS_START, 0);
1539 tcpm_set_state(port, SNK_READY, 0);
1546 * VDM/VDO handling functions
1548 static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
1549 const u32 *data, int cnt, enum tcpm_transmit_type tx_sop_type)
1551 u32 vdo_hdr = port->vdo_data[0];
1553 WARN_ON(!mutex_is_locked(&port->lock));
1555 /* If is sending discover_identity, handle received message first */
1556 if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMD(vdo_hdr) == CMD_DISCOVER_IDENT) {
1557 if (tx_sop_type == TCPC_TX_SOP_PRIME)
1558 port->send_discover_prime = true;
1560 port->send_discover = true;
1561 mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
1563 /* Make sure we are not still processing a previous VDM packet */
1564 WARN_ON(port->vdm_state > VDM_STATE_DONE);
1567 port->vdo_count = cnt + 1;
1568 port->vdo_data[0] = header;
1569 memcpy(&port->vdo_data[1], data, sizeof(u32) * cnt);
1570 /* Set ready, vdm state machine will actually send */
1571 port->vdm_retries = 0;
1572 port->vdm_state = VDM_STATE_READY;
1573 port->vdm_sm_running = true;
1575 port->tx_sop_type = tx_sop_type;
1577 mod_vdm_delayed_work(port, 0);
1580 static void tcpm_queue_vdm_unlocked(struct tcpm_port *port, const u32 header,
1581 const u32 *data, int cnt, enum tcpm_transmit_type tx_sop_type)
1583 if (port->state != SRC_READY && port->state != SNK_READY &&
1584 port->state != SRC_VDM_IDENTITY_REQUEST)
1587 mutex_lock(&port->lock);
1588 tcpm_queue_vdm(port, header, data, cnt, tx_sop_type);
1589 mutex_unlock(&port->lock);
1592 static void svdm_consume_identity(struct tcpm_port *port, const u32 *p, int cnt)
1594 u32 vdo = p[VDO_INDEX_IDH];
1595 u32 product = p[VDO_INDEX_PRODUCT];
1597 memset(&port->mode_data, 0, sizeof(port->mode_data));
1599 port->partner_ident.id_header = vdo;
1600 port->partner_ident.cert_stat = p[VDO_INDEX_CSTAT];
1601 port->partner_ident.product = product;
1604 typec_partner_set_identity(port->partner);
1606 tcpm_log(port, "Identity: %04x:%04x.%04x",
1608 PD_PRODUCT_PID(product), product & 0xffff);
1611 static void svdm_consume_identity_sop_prime(struct tcpm_port *port, const u32 *p, int cnt)
1613 u32 idh = p[VDO_INDEX_IDH];
1614 u32 product = p[VDO_INDEX_PRODUCT];
1618 * Attempt to consume identity only if cable currently is not set
1620 if (!IS_ERR_OR_NULL(port->cable))
1623 /* Reset cable identity */
1624 memset(&port->cable_ident, 0, sizeof(port->cable_ident));
1626 /* Fill out id header, cert, product, cable VDO 1 */
1627 port->cable_ident.id_header = idh;
1628 port->cable_ident.cert_stat = p[VDO_INDEX_CSTAT];
1629 port->cable_ident.product = product;
1630 port->cable_ident.vdo[0] = p[VDO_INDEX_CABLE_1];
1632 /* Fill out cable desc, infer svdm_version from pd revision */
1633 port->cable_desc.type = (enum typec_plug_type) (VDO_TYPEC_CABLE_TYPE(p[VDO_INDEX_CABLE_1]) +
1635 port->cable_desc.active = PD_IDH_PTYPE(idh) == IDH_PTYPE_ACABLE ? 1 : 0;
1636 /* Log PD Revision and additional cable VDO from negotiated revision */
1637 switch (port->negotiated_rev_prime) {
1639 port->cable_desc.pd_revision = 0x0300;
1640 if (port->cable_desc.active)
1641 port->cable_ident.vdo[1] = p[VDO_INDEX_CABLE_2];
1644 port->cable_desc.pd_revision = 0x0200;
1647 port->cable_desc.pd_revision = 0x0200;
1650 port->cable_desc.identity = &port->cable_ident;
1651 /* Register Cable, set identity and svdm_version */
1652 port->cable = typec_register_cable(port->typec_port, &port->cable_desc);
1653 if (IS_ERR_OR_NULL(port->cable))
1655 typec_cable_set_identity(port->cable);
1656 /* Get SVDM version */
1657 svdm_version = PD_VDO_SVDM_VER(p[VDO_INDEX_HDR]);
1658 typec_cable_set_svdm_version(port->cable, svdm_version);
1661 if (IS_ERR_OR_NULL(port->plug_prime)) {
1662 port->plug_prime_desc.index = TYPEC_PLUG_SOP_P;
1663 port->plug_prime = typec_register_plug(port->cable,
1664 &port->plug_prime_desc);
1668 static bool svdm_consume_svids(struct tcpm_port *port, const u32 *p, int cnt,
1669 enum tcpm_transmit_type rx_sop_type)
1671 struct pd_mode_data *pmdata = rx_sop_type == TCPC_TX_SOP_PRIME ?
1672 &port->mode_data_prime : &port->mode_data;
1675 for (i = 1; i < cnt; i++) {
1678 svid = (p[i] >> 16) & 0xffff;
1682 if (pmdata->nsvids >= SVID_DISCOVERY_MAX)
1685 pmdata->svids[pmdata->nsvids++] = svid;
1686 tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
1688 svid = p[i] & 0xffff;
1692 if (pmdata->nsvids >= SVID_DISCOVERY_MAX)
1695 pmdata->svids[pmdata->nsvids++] = svid;
1696 tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
1700 * PD3.0 Spec 6.4.4.3.2: The SVIDs are returned 2 per VDO (see Table
1701 * 6-43), and can be returned maximum 6 VDOs per response (see Figure
1702 * 6-19). If the Respondersupports 12 or more SVID then the Discover
1703 * SVIDs Command Shall be executed multiple times until a Discover
1704 * SVIDs VDO is returned ending either with a SVID value of 0x0000 in
1705 * the last part of the last VDO or with a VDO containing two SVIDs
1706 * with values of 0x0000.
1708 * However, some odd dockers support SVIDs less than 12 but without
1709 * 0x0000 in the last VDO, so we need to break the Discover SVIDs
1710 * request and return false here.
1714 tcpm_log(port, "SVID_DISCOVERY_MAX(%d) too low!", SVID_DISCOVERY_MAX);
1718 static void svdm_consume_modes(struct tcpm_port *port, const u32 *p, int cnt,
1719 enum tcpm_transmit_type rx_sop_type)
1721 struct pd_mode_data *pmdata = &port->mode_data;
1722 struct typec_altmode_desc *paltmode;
1725 switch (rx_sop_type) {
1726 case TCPC_TX_SOP_PRIME:
1727 pmdata = &port->mode_data_prime;
1728 if (pmdata->altmodes >= ARRAY_SIZE(port->plug_prime_altmode)) {
1729 /* Already logged in svdm_consume_svids() */
1734 pmdata = &port->mode_data;
1735 if (pmdata->altmodes >= ARRAY_SIZE(port->partner_altmode)) {
1736 /* Already logged in svdm_consume_svids() */
1744 for (i = 1; i < cnt; i++) {
1745 paltmode = &pmdata->altmode_desc[pmdata->altmodes];
1746 memset(paltmode, 0, sizeof(*paltmode));
1748 paltmode->svid = pmdata->svids[pmdata->svid_index];
1750 paltmode->vdo = p[i];
1752 tcpm_log(port, " Alternate mode %d: SVID 0x%04x, VDO %d: 0x%08x",
1753 pmdata->altmodes, paltmode->svid,
1754 paltmode->mode, paltmode->vdo);
1760 static void tcpm_register_partner_altmodes(struct tcpm_port *port)
1762 struct pd_mode_data *modep = &port->mode_data;
1763 struct typec_altmode *altmode;
1769 for (i = 0; i < modep->altmodes; i++) {
1770 altmode = typec_partner_register_altmode(port->partner,
1771 &modep->altmode_desc[i]);
1772 if (IS_ERR(altmode)) {
1773 tcpm_log(port, "Failed to register partner SVID 0x%04x",
1774 modep->altmode_desc[i].svid);
1777 port->partner_altmode[i] = altmode;
1781 static void tcpm_register_plug_altmodes(struct tcpm_port *port)
1783 struct pd_mode_data *modep = &port->mode_data_prime;
1784 struct typec_altmode *altmode;
1787 typec_plug_set_num_altmodes(port->plug_prime, modep->altmodes);
1789 for (i = 0; i < modep->altmodes; i++) {
1790 altmode = typec_plug_register_altmode(port->plug_prime,
1791 &modep->altmode_desc[i]);
1792 if (IS_ERR(altmode)) {
1793 tcpm_log(port, "Failed to register plug SVID 0x%04x",
1794 modep->altmode_desc[i].svid);
1797 port->plug_prime_altmode[i] = altmode;
1801 #define supports_modal(port) PD_IDH_MODAL_SUPP((port)->partner_ident.id_header)
1802 #define supports_modal_cable(port) PD_IDH_MODAL_SUPP((port)->cable_ident.id_header)
1803 #define supports_host(port) PD_IDH_HOST_SUPP((port->partner_ident.id_header))
1806 * Helper to determine whether the port is capable of SOP' communication at the
1807 * current point in time.
1809 static bool tcpm_can_communicate_sop_prime(struct tcpm_port *port)
1811 /* Check to see if tcpc supports SOP' communication */
1812 if (!port->tcpc->cable_comm_capable || !port->tcpc->cable_comm_capable(port->tcpc))
1815 * Power Delivery 2.0 Section 6.3.11
1816 * Before communicating with a Cable Plug a Port Should ensure that it
1817 * is the Vconn Source and that the Cable Plugs are powered by
1818 * performing a Vconn swap if necessary. Since it cannot be guaranteed
1819 * that the present Vconn Source is supplying Vconn, the only means to
1820 * ensure that the Cable Plugs are powered is for a Port wishing to
1821 * communicate with a Cable Plug is to become the Vconn Source.
1823 * Power Delivery 3.0 Section 6.3.11
1824 * Before communicating with a Cable Plug a Port Shall ensure that it
1825 * is the Vconn source.
1827 if (port->vconn_role != TYPEC_SOURCE)
1830 * Power Delivery 2.0 Section 2.4.4
1831 * When no Contract or an Implicit Contract is in place the Source can
1832 * communicate with a Cable Plug using SOP' packets in order to discover
1833 * its characteristics.
1835 * Power Delivery 3.0 Section 2.4.4
1836 * When no Contract or an Implicit Contract is in place only the Source
1837 * port that is supplying Vconn is allowed to send packets to a Cable
1838 * Plug and is allowed to respond to packets from the Cable Plug.
1840 if (!port->explicit_contract)
1841 return port->pwr_role == TYPEC_SOURCE;
1842 if (port->negotiated_rev == PD_REV30)
1845 * Power Delivery 2.0 Section 2.4.4
1847 * When an Explicit Contract is in place the DFP (either the Source or
1848 * the Sink) can communicate with the Cable Plug(s) using SOP’/SOP”
1849 * Packets (see Figure 2-3).
1851 if (port->negotiated_rev == PD_REV20)
1852 return port->data_role == TYPEC_HOST;
1856 static bool tcpm_attempt_vconn_swap_discovery(struct tcpm_port *port)
1858 if (!port->tcpc->attempt_vconn_swap_discovery)
1861 /* Port is already source, no need to perform swap */
1862 if (port->vconn_role == TYPEC_SOURCE)
1866 * Partner needs to support Alternate Modes with modal support. If
1867 * partner is also capable of being a USB Host, it could be a device
1868 * that supports Alternate Modes as the DFP.
1870 if (!supports_modal(port) || supports_host(port))
1873 if ((port->negotiated_rev == PD_REV20 && port->data_role == TYPEC_HOST) ||
1874 port->negotiated_rev == PD_REV30)
1875 return port->tcpc->attempt_vconn_swap_discovery(port->tcpc);
1881 static bool tcpm_cable_vdm_supported(struct tcpm_port *port)
1883 return !IS_ERR_OR_NULL(port->cable) &&
1884 typec_cable_is_active(port->cable) &&
1885 supports_modal_cable(port) &&
1886 tcpm_can_communicate_sop_prime(port);
1889 static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
1890 const u32 *p, int cnt, u32 *response,
1891 enum adev_actions *adev_action,
1892 enum tcpm_transmit_type rx_sop_type,
1893 enum tcpm_transmit_type *response_tx_sop_type)
1895 struct typec_port *typec = port->typec_port;
1896 struct typec_altmode *pdev, *pdev_prime;
1897 struct pd_mode_data *modep, *modep_prime;
1905 cmd_type = PD_VDO_CMDT(p[0]);
1906 cmd = PD_VDO_CMD(p[0]);
1908 tcpm_log(port, "Rx VDM cmd 0x%x type %d cmd %d len %d",
1909 p[0], cmd_type, cmd, cnt);
1911 switch (rx_sop_type) {
1912 case TCPC_TX_SOP_PRIME:
1913 modep_prime = &port->mode_data_prime;
1914 pdev_prime = typec_match_altmode(port->plug_prime_altmode,
1915 ALTMODE_DISCOVERY_MAX,
1918 svdm_version = typec_get_cable_svdm_version(typec);
1920 * Update SVDM version if cable was discovered before port partner.
1922 if (!IS_ERR_OR_NULL(port->cable) &&
1923 PD_VDO_SVDM_VER(p[0]) < svdm_version)
1924 typec_cable_set_svdm_version(port->cable, svdm_version);
1927 modep = &port->mode_data;
1928 pdev = typec_match_altmode(port->partner_altmode,
1929 ALTMODE_DISCOVERY_MAX,
1932 svdm_version = typec_get_negotiated_svdm_version(typec);
1933 if (svdm_version < 0)
1937 modep = &port->mode_data;
1938 pdev = typec_match_altmode(port->partner_altmode,
1939 ALTMODE_DISCOVERY_MAX,
1942 svdm_version = typec_get_negotiated_svdm_version(typec);
1943 if (svdm_version < 0)
1951 * Only the port or port partner is allowed to initialize SVDM
1952 * commands over SOP'. In case the port partner initializes a
1953 * sequence when it is not allowed to send SOP' messages, drop
1954 * the message should the TCPM port try to process it.
1956 if (rx_sop_type == TCPC_TX_SOP_PRIME)
1960 case CMD_DISCOVER_IDENT:
1961 if (PD_VDO_VID(p[0]) != USB_SID_PD)
1964 if (IS_ERR_OR_NULL(port->partner))
1967 if (PD_VDO_SVDM_VER(p[0]) < svdm_version) {
1968 typec_partner_set_svdm_version(port->partner,
1969 PD_VDO_SVDM_VER(p[0]));
1970 svdm_version = PD_VDO_SVDM_VER(p[0]);
1973 port->ams = DISCOVER_IDENTITY;
1975 * PD2.0 Spec 6.10.3: respond with NAK as DFP (data host)
1976 * PD3.1 Spec 6.4.4.2.5.1: respond with NAK if "invalid field" or
1977 * "wrong configuation" or "Unrecognized"
1979 if ((port->data_role == TYPEC_DEVICE || svdm_version >= SVDM_VER_2_0) &&
1981 if (svdm_version < SVDM_VER_2_0) {
1982 for (i = 0; i < port->nr_snk_vdo_v1; i++)
1983 response[i + 1] = port->snk_vdo_v1[i];
1984 rlen = port->nr_snk_vdo_v1 + 1;
1987 for (i = 0; i < port->nr_snk_vdo; i++)
1988 response[i + 1] = port->snk_vdo[i];
1989 rlen = port->nr_snk_vdo + 1;
1993 case CMD_DISCOVER_SVID:
1994 port->ams = DISCOVER_SVIDS;
1996 case CMD_DISCOVER_MODES:
1997 port->ams = DISCOVER_MODES;
1999 case CMD_ENTER_MODE:
2000 port->ams = DFP_TO_UFP_ENTER_MODE;
2003 port->ams = DFP_TO_UFP_EXIT_MODE;
2006 /* Attention command does not have response */
2007 *adev_action = ADEV_ATTENTION;
2013 response[0] = p[0] | VDO_CMDT(CMDT_RSP_ACK);
2014 } else if (rlen == 0) {
2015 response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
2018 response[0] = p[0] | VDO_CMDT(CMDT_RSP_BUSY);
2021 response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
2022 (VDO_SVDM_VERS(typec_get_negotiated_svdm_version(typec)));
2026 * Silently drop message if we are not connected, but can process
2027 * if SOP' Discover Identity prior to explicit contract.
2029 if (IS_ERR_OR_NULL(port->partner) &&
2030 !(rx_sop_type == TCPC_TX_SOP_PRIME && cmd == CMD_DISCOVER_IDENT))
2033 tcpm_ams_finish(port);
2037 * SVDM Command Flow for SOP and SOP':
2038 * SOP Discover Identity
2039 * SOP' Discover Identity
2040 * SOP Discover SVIDs
2043 * SOP' Discover SVIDs
2046 * Perform Discover SOP' if the port can communicate with cable
2049 case CMD_DISCOVER_IDENT:
2050 switch (rx_sop_type) {
2052 if (PD_VDO_SVDM_VER(p[0]) < svdm_version) {
2053 typec_partner_set_svdm_version(port->partner,
2054 PD_VDO_SVDM_VER(p[0]));
2055 /* If cable is discovered before partner, downgrade svdm */
2056 if (!IS_ERR_OR_NULL(port->cable) &&
2057 (typec_get_cable_svdm_version(port->typec_port) >
2059 typec_cable_set_svdm_version(port->cable,
2063 svdm_consume_identity(port, p, cnt);
2064 /* Attempt Vconn swap, delay SOP' discovery if necessary */
2065 if (tcpm_attempt_vconn_swap_discovery(port)) {
2066 port->send_discover_prime = true;
2067 port->upcoming_state = VCONN_SWAP_SEND;
2068 ret = tcpm_ams_start(port, VCONN_SWAP);
2071 /* Cannot perform Vconn swap */
2072 port->upcoming_state = INVALID_STATE;
2073 port->send_discover_prime = false;
2077 * Attempt Discover Identity on SOP' if the
2078 * cable was not discovered previously, and use
2079 * the SVDM version of the partner to probe.
2081 if (IS_ERR_OR_NULL(port->cable) &&
2082 tcpm_can_communicate_sop_prime(port)) {
2083 *response_tx_sop_type = TCPC_TX_SOP_PRIME;
2084 port->send_discover_prime = true;
2085 response[0] = VDO(USB_SID_PD, 1,
2086 typec_get_negotiated_svdm_version(typec),
2087 CMD_DISCOVER_IDENT);
2090 *response_tx_sop_type = TCPC_TX_SOP;
2091 response[0] = VDO(USB_SID_PD, 1,
2092 typec_get_negotiated_svdm_version(typec),
2097 case TCPC_TX_SOP_PRIME:
2099 * svdm_consume_identity_sop_prime will determine
2100 * the svdm_version for the cable moving forward.
2102 svdm_consume_identity_sop_prime(port, p, cnt);
2105 * If received in SRC_VDM_IDENTITY_REQUEST, continue
2106 * to SRC_SEND_CAPABILITIES
2108 if (port->state == SRC_VDM_IDENTITY_REQUEST) {
2109 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
2113 *response_tx_sop_type = TCPC_TX_SOP;
2114 response[0] = VDO(USB_SID_PD, 1,
2115 typec_get_negotiated_svdm_version(typec),
2123 case CMD_DISCOVER_SVID:
2124 *response_tx_sop_type = rx_sop_type;
2126 if (svdm_consume_svids(port, p, cnt, rx_sop_type)) {
2127 response[0] = VDO(USB_SID_PD, 1, svdm_version, CMD_DISCOVER_SVID);
2130 if (rx_sop_type == TCPC_TX_SOP) {
2131 if (modep->nsvids && supports_modal(port)) {
2132 response[0] = VDO(modep->svids[0], 1, svdm_version,
2133 CMD_DISCOVER_MODES);
2136 } else if (rx_sop_type == TCPC_TX_SOP_PRIME) {
2137 if (modep_prime->nsvids) {
2138 response[0] = VDO(modep_prime->svids[0], 1,
2139 svdm_version, CMD_DISCOVER_MODES);
2145 case CMD_DISCOVER_MODES:
2146 if (rx_sop_type == TCPC_TX_SOP) {
2148 svdm_consume_modes(port, p, cnt, rx_sop_type);
2149 modep->svid_index++;
2150 if (modep->svid_index < modep->nsvids) {
2151 u16 svid = modep->svids[modep->svid_index];
2152 *response_tx_sop_type = TCPC_TX_SOP;
2153 response[0] = VDO(svid, 1, svdm_version,
2154 CMD_DISCOVER_MODES);
2156 } else if (tcpm_cable_vdm_supported(port)) {
2157 *response_tx_sop_type = TCPC_TX_SOP_PRIME;
2158 response[0] = VDO(USB_SID_PD, 1,
2159 typec_get_cable_svdm_version(typec),
2163 tcpm_register_partner_altmodes(port);
2165 } else if (rx_sop_type == TCPC_TX_SOP_PRIME) {
2167 svdm_consume_modes(port, p, cnt, rx_sop_type);
2168 modep_prime->svid_index++;
2169 if (modep_prime->svid_index < modep_prime->nsvids) {
2170 u16 svid = modep_prime->svids[modep_prime->svid_index];
2171 *response_tx_sop_type = TCPC_TX_SOP_PRIME;
2172 response[0] = VDO(svid, 1,
2173 typec_get_cable_svdm_version(typec),
2174 CMD_DISCOVER_MODES);
2177 tcpm_register_plug_altmodes(port);
2178 tcpm_register_partner_altmodes(port);
2182 case CMD_ENTER_MODE:
2183 *response_tx_sop_type = rx_sop_type;
2184 if (rx_sop_type == TCPC_TX_SOP) {
2186 typec_altmode_update_active(pdev, true);
2187 *adev_action = ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL;
2189 } else if (rx_sop_type == TCPC_TX_SOP_PRIME) {
2190 if (adev && pdev_prime) {
2191 typec_altmode_update_active(pdev_prime, true);
2192 *adev_action = ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL;
2197 *response_tx_sop_type = rx_sop_type;
2198 if (rx_sop_type == TCPC_TX_SOP) {
2200 typec_altmode_update_active(pdev, false);
2201 /* Back to USB Operation */
2202 *adev_action = ADEV_NOTIFY_USB_AND_QUEUE_VDM;
2207 case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15):
2210 /* Unrecognized SVDM */
2211 response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
2213 response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
2214 (VDO_SVDM_VERS(svdm_version));
2219 tcpm_ams_finish(port);
2221 case CMD_DISCOVER_IDENT:
2222 case CMD_DISCOVER_SVID:
2223 case CMD_DISCOVER_MODES:
2224 case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15):
2226 case CMD_ENTER_MODE:
2227 /* Back to USB Operation */
2228 *adev_action = ADEV_NOTIFY_USB_AND_QUEUE_VDM;
2231 /* Unrecognized SVDM */
2232 response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
2234 response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
2235 (VDO_SVDM_VERS(svdm_version));
2240 response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
2242 response[0] = (response[0] & ~VDO_SVDM_VERS_MASK) |
2243 (VDO_SVDM_VERS(svdm_version));
2247 /* Informing the alternate mode drivers about everything */
2248 *adev_action = ADEV_QUEUE_VDM;
2252 static void tcpm_pd_handle_msg(struct tcpm_port *port,
2253 enum pd_msg_request message,
2256 static void tcpm_handle_vdm_request(struct tcpm_port *port,
2257 const __le32 *payload, int cnt,
2258 enum tcpm_transmit_type rx_sop_type)
2260 enum adev_actions adev_action = ADEV_NONE;
2261 struct typec_altmode *adev;
2262 u32 p[PD_MAX_PAYLOAD];
2263 u32 response[8] = { };
2265 enum tcpm_transmit_type response_tx_sop_type = TCPC_TX_SOP;
2267 for (i = 0; i < cnt; i++)
2268 p[i] = le32_to_cpu(payload[i]);
2270 adev = typec_match_altmode(port->port_altmode, ALTMODE_DISCOVERY_MAX,
2271 PD_VDO_VID(p[0]), PD_VDO_OPOS(p[0]));
2273 if (port->vdm_state == VDM_STATE_BUSY) {
2274 /* If UFP responded busy retry after timeout */
2275 if (PD_VDO_CMDT(p[0]) == CMDT_RSP_BUSY) {
2276 port->vdm_state = VDM_STATE_WAIT_RSP_BUSY;
2277 port->vdo_retry = (p[0] & ~VDO_CMDT_MASK) |
2279 mod_vdm_delayed_work(port, PD_T_VDM_BUSY);
2282 port->vdm_state = VDM_STATE_DONE;
2285 if (PD_VDO_SVDM(p[0]) && (adev || tcpm_vdm_ams(port) || port->nr_snk_vdo)) {
2287 * Here a SVDM is received (INIT or RSP or unknown). Set the vdm_sm_running in
2288 * advance because we are dropping the lock but may send VDMs soon.
2289 * For the cases of INIT received:
2290 * - If no response to send, it will be cleared later in this function.
2291 * - If there are responses to send, it will be cleared in the state machine.
2292 * For the cases of RSP received:
2293 * - If no further INIT to send, it will be cleared later in this function.
2294 * - Otherwise, it will be cleared in the state machine if timeout or it will go
2295 * back here until no further INIT to send.
2296 * For the cases of unknown type received:
2297 * - We will send NAK and the flag will be cleared in the state machine.
2299 port->vdm_sm_running = true;
2300 rlen = tcpm_pd_svdm(port, adev, p, cnt, response, &adev_action,
2301 rx_sop_type, &response_tx_sop_type);
2303 if (port->negotiated_rev >= PD_REV30)
2304 tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
2308 * We are done with any state stored in the port struct now, except
2309 * for any port struct changes done by the tcpm_queue_vdm() call
2310 * below, which is a separate operation.
2312 * So we can safely release the lock here; and we MUST release the
2313 * lock here to avoid an AB BA lock inversion:
2315 * If we keep the lock here then the lock ordering in this path is:
2316 * 1. tcpm_pd_rx_handler take the tcpm port lock
2317 * 2. One of the typec_altmode_* calls below takes the alt-mode's lock
2319 * And we also have this ordering:
2320 * 1. alt-mode driver takes the alt-mode's lock
2321 * 2. alt-mode driver calls tcpm_altmode_enter which takes the
2324 * Dropping our lock here avoids this.
2326 mutex_unlock(&port->lock);
2329 switch (adev_action) {
2332 case ADEV_NOTIFY_USB_AND_QUEUE_VDM:
2333 WARN_ON(typec_altmode_notify(adev, TYPEC_STATE_USB, NULL));
2334 typec_altmode_vdm(adev, p[0], &p[1], cnt);
2336 case ADEV_QUEUE_VDM:
2337 if (response_tx_sop_type == TCPC_TX_SOP_PRIME)
2338 typec_cable_altmode_vdm(adev, TYPEC_PLUG_SOP_P, p[0], &p[1], cnt);
2340 typec_altmode_vdm(adev, p[0], &p[1], cnt);
2342 case ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL:
2343 if (response_tx_sop_type == TCPC_TX_SOP_PRIME) {
2344 if (typec_cable_altmode_vdm(adev, TYPEC_PLUG_SOP_P,
2345 p[0], &p[1], cnt)) {
2346 int svdm_version = typec_get_cable_svdm_version(
2348 if (svdm_version < 0)
2351 response[0] = VDO(adev->svid, 1, svdm_version,
2353 response[0] |= VDO_OPOS(adev->mode);
2357 if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) {
2358 int svdm_version = typec_get_negotiated_svdm_version(
2360 if (svdm_version < 0)
2363 response[0] = VDO(adev->svid, 1, svdm_version,
2365 response[0] |= VDO_OPOS(adev->mode);
2370 case ADEV_ATTENTION:
2371 if (typec_altmode_attention(adev, p[1]))
2372 tcpm_log(port, "typec_altmode_attention no port partner altmode");
2378 * We must re-take the lock here to balance the unlock in
2379 * tcpm_pd_rx_handler, note that no changes, other then the
2380 * tcpm_queue_vdm call, are made while the lock is held again.
2381 * All that is done after the call is unwinding the call stack until
2382 * we return to tcpm_pd_rx_handler and do the unlock there.
2384 mutex_lock(&port->lock);
2387 tcpm_queue_vdm(port, response[0], &response[1], rlen - 1, response_tx_sop_type);
2389 port->vdm_sm_running = false;
2392 static void tcpm_send_vdm(struct tcpm_port *port, u32 vid, int cmd,
2393 const u32 *data, int count, enum tcpm_transmit_type tx_sop_type)
2398 switch (tx_sop_type) {
2399 case TCPC_TX_SOP_PRIME:
2401 * If the port partner is discovered, then the port partner's
2402 * SVDM Version will be returned
2404 svdm_version = typec_get_cable_svdm_version(port->typec_port);
2405 if (svdm_version < 0)
2406 svdm_version = SVDM_VER_MAX;
2409 svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
2410 if (svdm_version < 0)
2414 svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
2415 if (svdm_version < 0)
2420 if (WARN_ON(count > VDO_MAX_SIZE - 1))
2421 count = VDO_MAX_SIZE - 1;
2423 /* set VDM header with VID & CMD */
2424 header = VDO(vid, ((vid & USB_SID_PD) == USB_SID_PD) ?
2425 1 : (PD_VDO_CMD(cmd) <= CMD_ATTENTION),
2427 tcpm_queue_vdm(port, header, data, count, tx_sop_type);
2430 static unsigned int vdm_ready_timeout(u32 vdm_hdr)
2432 unsigned int timeout;
2433 int cmd = PD_VDO_CMD(vdm_hdr);
2435 /* its not a structured VDM command */
2436 if (!PD_VDO_SVDM(vdm_hdr))
2437 return PD_T_VDM_UNSTRUCTURED;
2439 switch (PD_VDO_CMDT(vdm_hdr)) {
2441 if (cmd == CMD_ENTER_MODE || cmd == CMD_EXIT_MODE)
2442 timeout = PD_T_VDM_WAIT_MODE_E;
2444 timeout = PD_T_VDM_SNDR_RSP;
2447 if (cmd == CMD_ENTER_MODE || cmd == CMD_EXIT_MODE)
2448 timeout = PD_T_VDM_E_MODE;
2450 timeout = PD_T_VDM_RCVR_RSP;
2456 static void vdm_run_state_machine(struct tcpm_port *port)
2458 struct pd_message msg;
2460 u32 vdo_hdr = port->vdo_data[0];
2461 u32 response[8] = { };
2463 switch (port->vdm_state) {
2464 case VDM_STATE_READY:
2465 /* Only transmit VDM if attached */
2466 if (!port->attached) {
2467 port->vdm_state = VDM_STATE_ERR_BUSY;
2472 * if there's traffic or we're not in PDO ready state don't send
2475 if (port->state != SRC_READY && port->state != SNK_READY &&
2476 port->state != SRC_VDM_IDENTITY_REQUEST) {
2477 port->vdm_sm_running = false;
2481 /* TODO: AMS operation for Unstructured VDM */
2482 if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMDT(vdo_hdr) == CMDT_INIT) {
2483 switch (PD_VDO_CMD(vdo_hdr)) {
2484 case CMD_DISCOVER_IDENT:
2485 res = tcpm_ams_start(port, DISCOVER_IDENTITY);
2487 switch (port->tx_sop_type) {
2488 case TCPC_TX_SOP_PRIME:
2489 port->send_discover_prime = false;
2492 port->send_discover = false;
2495 port->send_discover = false;
2498 } else if (res == -EAGAIN) {
2499 port->vdo_data[0] = 0;
2500 mod_send_discover_delayed_work(port,
2501 SEND_DISCOVER_RETRY_MS);
2504 case CMD_DISCOVER_SVID:
2505 res = tcpm_ams_start(port, DISCOVER_SVIDS);
2507 case CMD_DISCOVER_MODES:
2508 res = tcpm_ams_start(port, DISCOVER_MODES);
2510 case CMD_ENTER_MODE:
2511 res = tcpm_ams_start(port, DFP_TO_UFP_ENTER_MODE);
2514 res = tcpm_ams_start(port, DFP_TO_UFP_EXIT_MODE);
2517 res = tcpm_ams_start(port, ATTENTION);
2519 case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15):
2520 res = tcpm_ams_start(port, STRUCTURED_VDMS);
2528 port->vdm_state = VDM_STATE_ERR_BUSY;
2533 port->vdm_state = VDM_STATE_SEND_MESSAGE;
2534 mod_vdm_delayed_work(port, (port->negotiated_rev >= PD_REV30 &&
2535 port->pwr_role == TYPEC_SOURCE &&
2536 PD_VDO_SVDM(vdo_hdr) &&
2537 PD_VDO_CMDT(vdo_hdr) == CMDT_INIT) ?
2540 case VDM_STATE_WAIT_RSP_BUSY:
2541 port->vdo_data[0] = port->vdo_retry;
2542 port->vdo_count = 1;
2543 port->vdm_state = VDM_STATE_READY;
2544 tcpm_ams_finish(port);
2546 case VDM_STATE_BUSY:
2547 port->vdm_state = VDM_STATE_ERR_TMOUT;
2548 if (port->ams != NONE_AMS)
2549 tcpm_ams_finish(port);
2551 case VDM_STATE_ERR_SEND:
2553 * When sending Discover Identity to SOP' before establishing an
2554 * explicit contract, do not retry. Instead, weave sending
2555 * Source_Capabilities over SOP and Discover Identity over SOP'.
2557 if (port->state == SRC_VDM_IDENTITY_REQUEST) {
2558 tcpm_ams_finish(port);
2559 port->vdm_state = VDM_STATE_DONE;
2560 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
2562 * A partner which does not support USB PD will not reply,
2563 * so this is not a fatal error. At the same time, some
2564 * devices may not return GoodCRC under some circumstances,
2565 * so we need to retry.
2567 } else if (port->vdm_retries < 3) {
2568 tcpm_log(port, "VDM Tx error, retry");
2569 port->vdm_retries++;
2570 port->vdm_state = VDM_STATE_READY;
2571 if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMDT(vdo_hdr) == CMDT_INIT)
2572 tcpm_ams_finish(port);
2574 tcpm_ams_finish(port);
2575 if (port->tx_sop_type == TCPC_TX_SOP)
2577 /* Handle SOP' Transmission Errors */
2578 switch (PD_VDO_CMD(vdo_hdr)) {
2580 * If Discover Identity fails on SOP', then resume
2581 * discovery process on SOP only.
2583 case CMD_DISCOVER_IDENT:
2584 port->vdo_data[0] = 0;
2585 response[0] = VDO(USB_SID_PD, 1,
2586 typec_get_negotiated_svdm_version(
2589 tcpm_queue_vdm(port, response[0], &response[1],
2593 * If Discover SVIDs or Discover Modes fail, then
2594 * proceed with Alt Mode discovery process on SOP.
2596 case CMD_DISCOVER_SVID:
2597 tcpm_register_partner_altmodes(port);
2599 case CMD_DISCOVER_MODES:
2600 tcpm_register_partner_altmodes(port);
2607 case VDM_STATE_SEND_MESSAGE:
2608 /* Prepare and send VDM */
2609 memset(&msg, 0, sizeof(msg));
2610 if (port->tx_sop_type == TCPC_TX_SOP_PRIME) {
2611 msg.header = PD_HEADER_LE(PD_DATA_VENDOR_DEF,
2612 0, /* Cable Plug Indicator for DFP/UFP */
2614 port->negotiated_rev_prime,
2615 port->message_id_prime,
2618 msg.header = PD_HEADER_LE(PD_DATA_VENDOR_DEF,
2621 port->negotiated_rev,
2625 for (i = 0; i < port->vdo_count; i++)
2626 msg.payload[i] = cpu_to_le32(port->vdo_data[i]);
2627 res = tcpm_pd_transmit(port, port->tx_sop_type, &msg);
2629 port->vdm_state = VDM_STATE_ERR_SEND;
2631 unsigned long timeout;
2633 port->vdm_retries = 0;
2634 port->vdo_data[0] = 0;
2635 port->vdm_state = VDM_STATE_BUSY;
2636 timeout = vdm_ready_timeout(vdo_hdr);
2637 mod_vdm_delayed_work(port, timeout);
2645 static void vdm_state_machine_work(struct kthread_work *work)
2647 struct tcpm_port *port = container_of(work, struct tcpm_port, vdm_state_machine);
2648 enum vdm_states prev_state;
2650 mutex_lock(&port->lock);
2653 * Continue running as long as the port is not busy and there was
2657 prev_state = port->vdm_state;
2658 vdm_run_state_machine(port);
2659 } while (port->vdm_state != prev_state &&
2660 port->vdm_state != VDM_STATE_BUSY &&
2661 port->vdm_state != VDM_STATE_SEND_MESSAGE);
2663 if (port->vdm_state < VDM_STATE_READY)
2664 port->vdm_sm_running = false;
2666 mutex_unlock(&port->lock);
2672 PDO_ERR_VSAFE5V_NOT_FIRST,
2673 PDO_ERR_PDO_TYPE_NOT_IN_ORDER,
2674 PDO_ERR_FIXED_NOT_SORTED,
2675 PDO_ERR_VARIABLE_BATT_NOT_SORTED,
2677 PDO_ERR_PPS_APDO_NOT_SORTED,
2678 PDO_ERR_DUPE_PPS_APDO,
2681 static const char * const pdo_err_msg[] = {
2682 [PDO_ERR_NO_VSAFE5V] =
2683 " err: source/sink caps should at least have vSafe5V",
2684 [PDO_ERR_VSAFE5V_NOT_FIRST] =
2685 " err: vSafe5V Fixed Supply Object Shall always be the first object",
2686 [PDO_ERR_PDO_TYPE_NOT_IN_ORDER] =
2687 " err: PDOs should be in the following order: Fixed; Battery; Variable",
2688 [PDO_ERR_FIXED_NOT_SORTED] =
2689 " err: Fixed supply pdos should be in increasing order of their fixed voltage",
2690 [PDO_ERR_VARIABLE_BATT_NOT_SORTED] =
2691 " err: Variable/Battery supply pdos should be in increasing order of their minimum voltage",
2692 [PDO_ERR_DUPE_PDO] =
2693 " err: Variable/Batt supply pdos cannot have same min/max voltage",
2694 [PDO_ERR_PPS_APDO_NOT_SORTED] =
2695 " err: Programmable power supply apdos should be in increasing order of their maximum voltage",
2696 [PDO_ERR_DUPE_PPS_APDO] =
2697 " err: Programmable power supply apdos cannot have same min/max voltage and max current",
2700 static enum pdo_err tcpm_caps_err(struct tcpm_port *port, const u32 *pdo,
2701 unsigned int nr_pdo)
2705 /* Should at least contain vSafe5v */
2707 return PDO_ERR_NO_VSAFE5V;
2709 /* The vSafe5V Fixed Supply Object Shall always be the first object */
2710 if (pdo_type(pdo[0]) != PDO_TYPE_FIXED ||
2711 pdo_fixed_voltage(pdo[0]) != VSAFE5V)
2712 return PDO_ERR_VSAFE5V_NOT_FIRST;
2714 for (i = 1; i < nr_pdo; i++) {
2715 if (pdo_type(pdo[i]) < pdo_type(pdo[i - 1])) {
2716 return PDO_ERR_PDO_TYPE_NOT_IN_ORDER;
2717 } else if (pdo_type(pdo[i]) == pdo_type(pdo[i - 1])) {
2718 enum pd_pdo_type type = pdo_type(pdo[i]);
2722 * The remaining Fixed Supply Objects, if
2723 * present, shall be sent in voltage order;
2724 * lowest to highest.
2726 case PDO_TYPE_FIXED:
2727 if (pdo_fixed_voltage(pdo[i]) <=
2728 pdo_fixed_voltage(pdo[i - 1]))
2729 return PDO_ERR_FIXED_NOT_SORTED;
2732 * The Battery Supply Objects and Variable
2733 * supply, if present shall be sent in Minimum
2734 * Voltage order; lowest to highest.
2738 if (pdo_min_voltage(pdo[i]) <
2739 pdo_min_voltage(pdo[i - 1]))
2740 return PDO_ERR_VARIABLE_BATT_NOT_SORTED;
2741 else if ((pdo_min_voltage(pdo[i]) ==
2742 pdo_min_voltage(pdo[i - 1])) &&
2743 (pdo_max_voltage(pdo[i]) ==
2744 pdo_max_voltage(pdo[i - 1])))
2745 return PDO_ERR_DUPE_PDO;
2748 * The Programmable Power Supply APDOs, if present,
2749 * shall be sent in Maximum Voltage order;
2750 * lowest to highest.
2753 if (pdo_apdo_type(pdo[i]) != APDO_TYPE_PPS)
2756 if (pdo_pps_apdo_max_voltage(pdo[i]) <
2757 pdo_pps_apdo_max_voltage(pdo[i - 1]))
2758 return PDO_ERR_PPS_APDO_NOT_SORTED;
2759 else if (pdo_pps_apdo_min_voltage(pdo[i]) ==
2760 pdo_pps_apdo_min_voltage(pdo[i - 1]) &&
2761 pdo_pps_apdo_max_voltage(pdo[i]) ==
2762 pdo_pps_apdo_max_voltage(pdo[i - 1]) &&
2763 pdo_pps_apdo_max_current(pdo[i]) ==
2764 pdo_pps_apdo_max_current(pdo[i - 1]))
2765 return PDO_ERR_DUPE_PPS_APDO;
2768 tcpm_log_force(port, " Unknown pdo type");
2776 static int tcpm_validate_caps(struct tcpm_port *port, const u32 *pdo,
2777 unsigned int nr_pdo)
2779 enum pdo_err err_index = tcpm_caps_err(port, pdo, nr_pdo);
2781 if (err_index != PDO_NO_ERR) {
2782 tcpm_log_force(port, " %s", pdo_err_msg[err_index]);
2789 static int tcpm_altmode_enter(struct typec_altmode *altmode, u32 *vdo)
2791 struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2795 svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
2796 if (svdm_version < 0)
2797 return svdm_version;
2799 header = VDO(altmode->svid, vdo ? 2 : 1, svdm_version, CMD_ENTER_MODE);
2800 header |= VDO_OPOS(altmode->mode);
2802 tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0, TCPC_TX_SOP);
2806 static int tcpm_altmode_exit(struct typec_altmode *altmode)
2808 struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2812 svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
2813 if (svdm_version < 0)
2814 return svdm_version;
2816 header = VDO(altmode->svid, 1, svdm_version, CMD_EXIT_MODE);
2817 header |= VDO_OPOS(altmode->mode);
2819 tcpm_queue_vdm_unlocked(port, header, NULL, 0, TCPC_TX_SOP);
2823 static int tcpm_altmode_vdm(struct typec_altmode *altmode,
2824 u32 header, const u32 *data, int count)
2826 struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2828 tcpm_queue_vdm_unlocked(port, header, data, count - 1, TCPC_TX_SOP);
2833 static const struct typec_altmode_ops tcpm_altmode_ops = {
2834 .enter = tcpm_altmode_enter,
2835 .exit = tcpm_altmode_exit,
2836 .vdm = tcpm_altmode_vdm,
2840 static int tcpm_cable_altmode_enter(struct typec_altmode *altmode, enum typec_plug_index sop,
2843 struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2847 svdm_version = typec_get_cable_svdm_version(port->typec_port);
2848 if (svdm_version < 0)
2849 return svdm_version;
2851 header = VDO(altmode->svid, vdo ? 2 : 1, svdm_version, CMD_ENTER_MODE);
2852 header |= VDO_OPOS(altmode->mode);
2854 tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0, TCPC_TX_SOP_PRIME);
2858 static int tcpm_cable_altmode_exit(struct typec_altmode *altmode, enum typec_plug_index sop)
2860 struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2864 svdm_version = typec_get_cable_svdm_version(port->typec_port);
2865 if (svdm_version < 0)
2866 return svdm_version;
2868 header = VDO(altmode->svid, 1, svdm_version, CMD_EXIT_MODE);
2869 header |= VDO_OPOS(altmode->mode);
2871 tcpm_queue_vdm_unlocked(port, header, NULL, 0, TCPC_TX_SOP_PRIME);
2875 static int tcpm_cable_altmode_vdm(struct typec_altmode *altmode, enum typec_plug_index sop,
2876 u32 header, const u32 *data, int count)
2878 struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
2880 tcpm_queue_vdm_unlocked(port, header, data, count - 1, TCPC_TX_SOP_PRIME);
2885 static const struct typec_cable_ops tcpm_cable_ops = {
2886 .enter = tcpm_cable_altmode_enter,
2887 .exit = tcpm_cable_altmode_exit,
2888 .vdm = tcpm_cable_altmode_vdm,
2892 * PD (data, control) command handling functions
2894 static inline enum tcpm_state ready_state(struct tcpm_port *port)
2896 if (port->pwr_role == TYPEC_SOURCE)
2902 static int tcpm_pd_send_control(struct tcpm_port *port,
2903 enum pd_ctrl_msg_type type,
2904 enum tcpm_transmit_type tx_sop_type);
2906 static void tcpm_handle_alert(struct tcpm_port *port, const __le32 *payload,
2909 u32 p0 = le32_to_cpu(payload[0]);
2910 unsigned int type = usb_pd_ado_type(p0);
2913 tcpm_log(port, "Alert message received with no type");
2914 tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
2918 /* Just handling non-battery alerts for now */
2919 if (!(type & USB_PD_ADO_TYPE_BATT_STATUS_CHANGE)) {
2920 if (port->pwr_role == TYPEC_SOURCE) {
2921 port->upcoming_state = GET_STATUS_SEND;
2922 tcpm_ams_start(port, GETTING_SOURCE_SINK_STATUS);
2925 * Do not check SinkTxOk here in case the Source doesn't set its Rp to
2928 port->ams = GETTING_SOURCE_SINK_STATUS;
2929 tcpm_set_state(port, GET_STATUS_SEND, 0);
2932 tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
2936 static int tcpm_set_auto_vbus_discharge_threshold(struct tcpm_port *port,
2937 enum typec_pwr_opmode mode, bool pps_active,
2938 u32 requested_vbus_voltage)
2942 if (!port->tcpc->set_auto_vbus_discharge_threshold)
2945 ret = port->tcpc->set_auto_vbus_discharge_threshold(port->tcpc, mode, pps_active,
2946 requested_vbus_voltage);
2947 tcpm_log_force(port,
2948 "set_auto_vbus_discharge_threshold mode:%d pps_active:%c vbus:%u ret:%d",
2949 mode, pps_active ? 'y' : 'n', requested_vbus_voltage, ret);
2954 static void tcpm_pd_handle_state(struct tcpm_port *port,
2955 enum tcpm_state state,
2957 unsigned int delay_ms)
2959 switch (port->state) {
2963 tcpm_set_state(port, state, delay_ms);
2965 /* 8.3.3.4.1.1 and 6.8.1 power transitioning */
2966 case SNK_TRANSITION_SINK:
2967 case SNK_TRANSITION_SINK_VBUS:
2968 case SRC_TRANSITION_SUPPLY:
2969 tcpm_set_state(port, HARD_RESET_SEND, 0);
2972 if (!tcpm_ams_interruptible(port)) {
2973 tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ?
2974 SRC_SOFT_RESET_WAIT_SNK_TX :
2978 /* process the Message 6.8.1 */
2979 port->upcoming_state = state;
2980 port->next_ams = ams;
2981 tcpm_set_state(port, ready_state(port), delay_ms);
2987 static void tcpm_pd_handle_msg(struct tcpm_port *port,
2988 enum pd_msg_request message,
2991 switch (port->state) {
2995 tcpm_queue_message(port, message);
2997 /* PD 3.0 Spec 8.3.3.4.1.1 and 6.8.1 */
2998 case SNK_TRANSITION_SINK:
2999 case SNK_TRANSITION_SINK_VBUS:
3000 case SRC_TRANSITION_SUPPLY:
3001 tcpm_set_state(port, HARD_RESET_SEND, 0);
3004 if (!tcpm_ams_interruptible(port)) {
3005 tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ?
3006 SRC_SOFT_RESET_WAIT_SNK_TX :
3010 port->next_ams = ams;
3011 tcpm_set_state(port, ready_state(port), 0);
3012 /* 6.8.1 process the Message */
3013 tcpm_queue_message(port, message);
3019 static int tcpm_register_source_caps(struct tcpm_port *port)
3021 struct usb_power_delivery_desc desc = { port->negotiated_rev };
3022 struct usb_power_delivery_capabilities_desc caps = { };
3023 struct usb_power_delivery_capabilities *cap = port->partner_source_caps;
3025 if (!port->partner_pd)
3026 port->partner_pd = usb_power_delivery_register(NULL, &desc);
3027 if (IS_ERR(port->partner_pd))
3028 return PTR_ERR(port->partner_pd);
3030 memcpy(caps.pdo, port->source_caps, sizeof(u32) * port->nr_source_caps);
3031 caps.role = TYPEC_SOURCE;
3034 usb_power_delivery_unregister_capabilities(cap);
3035 port->partner_source_caps = NULL;
3038 cap = usb_power_delivery_register_capabilities(port->partner_pd, &caps);
3040 return PTR_ERR(cap);
3042 port->partner_source_caps = cap;
3047 static int tcpm_register_sink_caps(struct tcpm_port *port)
3049 struct usb_power_delivery_desc desc = { port->negotiated_rev };
3050 struct usb_power_delivery_capabilities_desc caps = { };
3051 struct usb_power_delivery_capabilities *cap;
3053 if (!port->partner_pd)
3054 port->partner_pd = usb_power_delivery_register(NULL, &desc);
3055 if (IS_ERR(port->partner_pd))
3056 return PTR_ERR(port->partner_pd);
3058 memcpy(caps.pdo, port->sink_caps, sizeof(u32) * port->nr_sink_caps);
3059 caps.role = TYPEC_SINK;
3061 cap = usb_power_delivery_register_capabilities(port->partner_pd, &caps);
3063 return PTR_ERR(cap);
3065 port->partner_sink_caps = cap;
3070 static void tcpm_pd_data_request(struct tcpm_port *port,
3071 const struct pd_message *msg,
3072 enum tcpm_transmit_type rx_sop_type)
3074 enum pd_data_msg_type type = pd_header_type_le(msg->header);
3075 unsigned int cnt = pd_header_cnt_le(msg->header);
3076 unsigned int rev = pd_header_rev_le(msg->header);
3078 enum frs_typec_current partner_frs_current;
3082 if (tcpm_vdm_ams(port) && type != PD_DATA_VENDOR_DEF) {
3083 port->vdm_state = VDM_STATE_ERR_BUSY;
3084 tcpm_ams_finish(port);
3085 mod_vdm_delayed_work(port, 0);
3089 case PD_DATA_SOURCE_CAP:
3090 for (i = 0; i < cnt; i++)
3091 port->source_caps[i] = le32_to_cpu(msg->payload[i]);
3093 port->nr_source_caps = cnt;
3095 tcpm_log_source_caps(port);
3097 tcpm_validate_caps(port, port->source_caps,
3098 port->nr_source_caps);
3100 tcpm_register_source_caps(port);
3103 * Adjust revision in subsequent message headers, as required,
3104 * to comply with 6.2.1.1.5 of the USB PD 3.0 spec. We don't
3105 * support Rev 1.0 so just do nothing in that scenario.
3107 if (rev == PD_REV10) {
3108 if (port->ams == GET_SOURCE_CAPABILITIES)
3109 tcpm_ams_finish(port);
3113 if (rev < PD_MAX_REV) {
3114 port->negotiated_rev = rev;
3115 if (port->negotiated_rev_prime > port->negotiated_rev)
3116 port->negotiated_rev_prime = port->negotiated_rev;
3119 if (port->pwr_role == TYPEC_SOURCE) {
3120 if (port->ams == GET_SOURCE_CAPABILITIES)
3121 tcpm_pd_handle_state(port, SRC_READY, NONE_AMS, 0);
3122 /* Unexpected Source Capabilities */
3124 tcpm_pd_handle_msg(port,
3125 port->negotiated_rev < PD_REV30 ?
3126 PD_MSG_CTRL_REJECT :
3127 PD_MSG_CTRL_NOT_SUPP,
3129 } else if (port->state == SNK_WAIT_CAPABILITIES ||
3130 port->state == SNK_WAIT_CAPABILITIES_TIMEOUT) {
3132 * This message may be received even if VBUS is not
3133 * present. This is quite unexpected; see USB PD
3134 * specification, sections 8.3.3.6.3.1 and 8.3.3.6.3.2.
3135 * However, at the same time, we must be ready to
3136 * receive this message and respond to it 15ms after
3137 * receiving PS_RDY during power swap operations, no matter
3138 * if VBUS is available or not (USB PD specification,
3140 * So we need to accept the message either way,
3141 * but be prepared to keep waiting for VBUS after it was
3144 port->ams = POWER_NEGOTIATION;
3145 port->in_ams = true;
3146 tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
3148 if (port->ams == GET_SOURCE_CAPABILITIES)
3149 tcpm_ams_finish(port);
3150 tcpm_pd_handle_state(port, SNK_NEGOTIATE_CAPABILITIES,
3151 POWER_NEGOTIATION, 0);
3154 case PD_DATA_REQUEST:
3156 * Adjust revision in subsequent message headers, as required,
3157 * to comply with 6.2.1.1.5 of the USB PD 3.0 spec. We don't
3158 * support Rev 1.0 so just reject in that scenario.
3160 if (rev == PD_REV10) {
3161 tcpm_pd_handle_msg(port,
3162 port->negotiated_rev < PD_REV30 ?
3163 PD_MSG_CTRL_REJECT :
3164 PD_MSG_CTRL_NOT_SUPP,
3169 if (rev < PD_MAX_REV) {
3170 port->negotiated_rev = rev;
3171 if (port->negotiated_rev_prime > port->negotiated_rev)
3172 port->negotiated_rev_prime = port->negotiated_rev;
3175 if (port->pwr_role != TYPEC_SOURCE || cnt != 1) {
3176 tcpm_pd_handle_msg(port,
3177 port->negotiated_rev < PD_REV30 ?
3178 PD_MSG_CTRL_REJECT :
3179 PD_MSG_CTRL_NOT_SUPP,
3184 port->sink_request = le32_to_cpu(msg->payload[0]);
3186 if (port->vdm_sm_running && port->explicit_contract) {
3187 tcpm_pd_handle_msg(port, PD_MSG_CTRL_WAIT, port->ams);
3191 if (port->state == SRC_SEND_CAPABILITIES)
3192 tcpm_set_state(port, SRC_NEGOTIATE_CAPABILITIES, 0);
3194 tcpm_pd_handle_state(port, SRC_NEGOTIATE_CAPABILITIES,
3195 POWER_NEGOTIATION, 0);
3197 case PD_DATA_SINK_CAP:
3198 /* We don't do anything with this at the moment... */
3199 for (i = 0; i < cnt; i++)
3200 port->sink_caps[i] = le32_to_cpu(msg->payload[i]);
3202 partner_frs_current = (port->sink_caps[0] & PDO_FIXED_FRS_CURR_MASK) >>
3203 PDO_FIXED_FRS_CURR_SHIFT;
3204 frs_enable = partner_frs_current && (partner_frs_current <=
3205 port->new_source_frs_current);
3207 "Port partner FRS capable partner_frs_current:%u port_frs_current:%u enable:%c",
3208 partner_frs_current, port->new_source_frs_current, frs_enable ? 'y' : 'n');
3210 ret = port->tcpc->enable_frs(port->tcpc, true);
3211 tcpm_log(port, "Enable FRS %s, ret:%d\n", ret ? "fail" : "success", ret);
3214 port->nr_sink_caps = cnt;
3215 port->sink_cap_done = true;
3216 tcpm_register_sink_caps(port);
3218 if (port->ams == GET_SINK_CAPABILITIES)
3219 tcpm_set_state(port, ready_state(port), 0);
3220 /* Unexpected Sink Capabilities */
3222 tcpm_pd_handle_msg(port,
3223 port->negotiated_rev < PD_REV30 ?
3224 PD_MSG_CTRL_REJECT :
3225 PD_MSG_CTRL_NOT_SUPP,
3228 case PD_DATA_VENDOR_DEF:
3229 tcpm_handle_vdm_request(port, msg->payload, cnt, rx_sop_type);
3232 port->bist_request = le32_to_cpu(msg->payload[0]);
3233 tcpm_pd_handle_state(port, BIST_RX, BIST, 0);
3236 if (port->state != SRC_READY && port->state != SNK_READY)
3237 tcpm_pd_handle_state(port, port->pwr_role == TYPEC_SOURCE ?
3238 SRC_SOFT_RESET_WAIT_SNK_TX : SNK_SOFT_RESET,
3241 tcpm_handle_alert(port, msg->payload, cnt);
3243 case PD_DATA_BATT_STATUS:
3244 case PD_DATA_GET_COUNTRY_INFO:
3245 /* Currently unsupported */
3246 tcpm_pd_handle_msg(port, port->negotiated_rev < PD_REV30 ?
3247 PD_MSG_CTRL_REJECT :
3248 PD_MSG_CTRL_NOT_SUPP,
3252 tcpm_pd_handle_msg(port, port->negotiated_rev < PD_REV30 ?
3253 PD_MSG_CTRL_REJECT :
3254 PD_MSG_CTRL_NOT_SUPP,
3256 tcpm_log(port, "Unrecognized data message type %#x", type);
3261 static void tcpm_pps_complete(struct tcpm_port *port, int result)
3263 if (port->pps_pending) {
3264 port->pps_status = result;
3265 port->pps_pending = false;
3266 complete(&port->pps_complete);
3270 static void tcpm_pd_ctrl_request(struct tcpm_port *port,
3271 const struct pd_message *msg,
3272 enum tcpm_transmit_type rx_sop_type)
3274 enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
3275 enum tcpm_state next_state;
3276 unsigned int rev = pd_header_rev_le(msg->header);
3279 * Stop VDM state machine if interrupted by other Messages while NOT_SUPP is allowed in
3280 * VDM AMS if waiting for VDM responses and will be handled later.
3282 if (tcpm_vdm_ams(port) && type != PD_CTRL_NOT_SUPP && type != PD_CTRL_GOOD_CRC) {
3283 port->vdm_state = VDM_STATE_ERR_BUSY;
3284 tcpm_ams_finish(port);
3285 mod_vdm_delayed_work(port, 0);
3289 case PD_CTRL_GOOD_CRC:
3292 case PD_CTRL_GET_SOURCE_CAP:
3293 tcpm_pd_handle_msg(port, PD_MSG_DATA_SOURCE_CAP, GET_SOURCE_CAPABILITIES);
3295 case PD_CTRL_GET_SINK_CAP:
3296 tcpm_pd_handle_msg(port, PD_MSG_DATA_SINK_CAP, GET_SINK_CAPABILITIES);
3298 case PD_CTRL_GOTO_MIN:
3300 case PD_CTRL_PS_RDY:
3301 switch (port->state) {
3302 case SNK_TRANSITION_SINK:
3303 if (port->vbus_present) {
3304 tcpm_set_current_limit(port,
3305 port->req_current_limit,
3306 port->req_supply_voltage);
3307 port->explicit_contract = true;
3308 tcpm_set_auto_vbus_discharge_threshold(port,
3310 port->pps_data.active,
3311 port->supply_voltage);
3312 tcpm_set_state(port, SNK_READY, 0);
3315 * Seen after power swap. Keep waiting for VBUS
3316 * in a transitional state.
3318 tcpm_set_state(port,
3319 SNK_TRANSITION_SINK_VBUS, 0);
3322 case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
3323 tcpm_set_state(port, PR_SWAP_SRC_SNK_SINK_ON, 0);
3325 case PR_SWAP_SNK_SRC_SINK_OFF:
3326 tcpm_set_state(port, PR_SWAP_SNK_SRC_SOURCE_ON, 0);
3328 case VCONN_SWAP_WAIT_FOR_VCONN:
3329 tcpm_set_state(port, VCONN_SWAP_TURN_OFF_VCONN, 0);
3331 case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
3332 tcpm_set_state(port, FR_SWAP_SNK_SRC_NEW_SINK_READY, 0);
3335 tcpm_pd_handle_state(port,
3336 port->pwr_role == TYPEC_SOURCE ?
3337 SRC_SOFT_RESET_WAIT_SNK_TX :
3343 case PD_CTRL_REJECT:
3345 case PD_CTRL_NOT_SUPP:
3346 switch (port->state) {
3347 case SNK_NEGOTIATE_CAPABILITIES:
3348 /* USB PD specification, Figure 8-43 */
3349 if (port->explicit_contract)
3350 next_state = SNK_READY;
3352 next_state = SNK_WAIT_CAPABILITIES;
3354 /* Threshold was relaxed before sending Request. Restore it back. */
3355 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
3356 port->pps_data.active,
3357 port->supply_voltage);
3358 tcpm_set_state(port, next_state, 0);
3360 case SNK_NEGOTIATE_PPS_CAPABILITIES:
3361 /* Revert data back from any requested PPS updates */
3362 port->pps_data.req_out_volt = port->supply_voltage;
3363 port->pps_data.req_op_curr = port->current_limit;
3364 port->pps_status = (type == PD_CTRL_WAIT ?
3365 -EAGAIN : -EOPNOTSUPP);
3367 /* Threshold was relaxed before sending Request. Restore it back. */
3368 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
3369 port->pps_data.active,
3370 port->supply_voltage);
3372 tcpm_set_state(port, SNK_READY, 0);
3375 port->swap_status = (type == PD_CTRL_WAIT ?
3376 -EAGAIN : -EOPNOTSUPP);
3377 tcpm_set_state(port, DR_SWAP_CANCEL, 0);
3380 port->swap_status = (type == PD_CTRL_WAIT ?
3381 -EAGAIN : -EOPNOTSUPP);
3382 tcpm_set_state(port, PR_SWAP_CANCEL, 0);
3384 case VCONN_SWAP_SEND:
3385 port->swap_status = (type == PD_CTRL_WAIT ?
3386 -EAGAIN : -EOPNOTSUPP);
3387 tcpm_set_state(port, VCONN_SWAP_CANCEL, 0);
3390 tcpm_set_state(port, FR_SWAP_CANCEL, 0);
3393 port->sink_cap_done = true;
3394 tcpm_set_state(port, ready_state(port), 0);
3397 * Some port partners do not support GET_STATUS, avoid soft reset the link to
3398 * prevent redundant power re-negotiation
3400 case GET_STATUS_SEND:
3401 tcpm_set_state(port, ready_state(port), 0);
3405 if (port->vdm_state > VDM_STATE_READY) {
3406 port->vdm_state = VDM_STATE_DONE;
3407 if (tcpm_vdm_ams(port))
3408 tcpm_ams_finish(port);
3409 mod_vdm_delayed_work(port, 0);
3414 tcpm_pd_handle_state(port,
3415 port->pwr_role == TYPEC_SOURCE ?
3416 SRC_SOFT_RESET_WAIT_SNK_TX :
3422 case PD_CTRL_ACCEPT:
3423 switch (port->state) {
3424 case SNK_NEGOTIATE_CAPABILITIES:
3425 port->pps_data.active = false;
3426 tcpm_set_state(port, SNK_TRANSITION_SINK, 0);
3428 case SNK_NEGOTIATE_PPS_CAPABILITIES:
3429 port->pps_data.active = true;
3430 port->pps_data.min_volt = port->pps_data.req_min_volt;
3431 port->pps_data.max_volt = port->pps_data.req_max_volt;
3432 port->pps_data.max_curr = port->pps_data.req_max_curr;
3433 port->req_supply_voltage = port->pps_data.req_out_volt;
3434 port->req_current_limit = port->pps_data.req_op_curr;
3435 power_supply_changed(port->psy);
3436 tcpm_set_state(port, SNK_TRANSITION_SINK, 0);
3438 case SOFT_RESET_SEND:
3439 if (port->ams == SOFT_RESET_AMS)
3440 tcpm_ams_finish(port);
3442 * SOP' Soft Reset is done after Vconn Swap,
3443 * which returns to ready state
3445 if (rx_sop_type == TCPC_TX_SOP_PRIME) {
3446 if (rev < port->negotiated_rev_prime)
3447 port->negotiated_rev_prime = rev;
3448 tcpm_set_state(port, ready_state(port), 0);
3451 if (port->pwr_role == TYPEC_SOURCE) {
3452 port->upcoming_state = SRC_SEND_CAPABILITIES;
3453 tcpm_ams_start(port, POWER_NEGOTIATION);
3455 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
3459 tcpm_set_state(port, DR_SWAP_CHANGE_DR, 0);
3462 tcpm_set_state(port, PR_SWAP_START, 0);
3464 case VCONN_SWAP_SEND:
3465 tcpm_set_state(port, VCONN_SWAP_START, 0);
3468 tcpm_set_state(port, FR_SWAP_SNK_SRC_TRANSITION_TO_OFF, 0);
3471 tcpm_pd_handle_state(port,
3472 port->pwr_role == TYPEC_SOURCE ?
3473 SRC_SOFT_RESET_WAIT_SNK_TX :
3479 case PD_CTRL_SOFT_RESET:
3480 port->ams = SOFT_RESET_AMS;
3481 tcpm_set_state(port, SOFT_RESET, 0);
3483 case PD_CTRL_DR_SWAP:
3486 * 6.3.9: If an alternate mode is active, a request to swap
3487 * alternate modes shall trigger a port reset.
3489 if (port->typec_caps.data != TYPEC_PORT_DRD) {
3490 tcpm_pd_handle_msg(port,
3491 port->negotiated_rev < PD_REV30 ?
3492 PD_MSG_CTRL_REJECT :
3493 PD_MSG_CTRL_NOT_SUPP,
3496 if (port->send_discover && port->negotiated_rev < PD_REV30) {
3497 tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
3501 tcpm_pd_handle_state(port, DR_SWAP_ACCEPT, DATA_ROLE_SWAP, 0);
3504 case PD_CTRL_PR_SWAP:
3505 if (port->port_type != TYPEC_PORT_DRP) {
3506 tcpm_pd_handle_msg(port,
3507 port->negotiated_rev < PD_REV30 ?
3508 PD_MSG_CTRL_REJECT :
3509 PD_MSG_CTRL_NOT_SUPP,
3512 if (port->send_discover && port->negotiated_rev < PD_REV30) {
3513 tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
3517 tcpm_pd_handle_state(port, PR_SWAP_ACCEPT, POWER_ROLE_SWAP, 0);
3520 case PD_CTRL_VCONN_SWAP:
3521 if (port->send_discover && port->negotiated_rev < PD_REV30) {
3522 tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
3526 tcpm_pd_handle_state(port, VCONN_SWAP_ACCEPT, VCONN_SWAP, 0);
3528 case PD_CTRL_GET_SOURCE_CAP_EXT:
3529 case PD_CTRL_GET_STATUS:
3530 case PD_CTRL_FR_SWAP:
3531 case PD_CTRL_GET_PPS_STATUS:
3532 case PD_CTRL_GET_COUNTRY_CODES:
3533 /* Currently not supported */
3534 tcpm_pd_handle_msg(port,
3535 port->negotiated_rev < PD_REV30 ?
3536 PD_MSG_CTRL_REJECT :
3537 PD_MSG_CTRL_NOT_SUPP,
3541 tcpm_pd_handle_msg(port,
3542 port->negotiated_rev < PD_REV30 ?
3543 PD_MSG_CTRL_REJECT :
3544 PD_MSG_CTRL_NOT_SUPP,
3546 tcpm_log(port, "Unrecognized ctrl message type %#x", type);
3551 static void tcpm_pd_ext_msg_request(struct tcpm_port *port,
3552 const struct pd_message *msg)
3554 enum pd_ext_msg_type type = pd_header_type_le(msg->header);
3555 unsigned int data_size = pd_ext_header_data_size_le(msg->ext_msg.header);
3557 /* stopping VDM state machine if interrupted by other Messages */
3558 if (tcpm_vdm_ams(port)) {
3559 port->vdm_state = VDM_STATE_ERR_BUSY;
3560 tcpm_ams_finish(port);
3561 mod_vdm_delayed_work(port, 0);
3564 if (!(le16_to_cpu(msg->ext_msg.header) & PD_EXT_HDR_CHUNKED)) {
3565 tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
3566 tcpm_log(port, "Unchunked extended messages unsupported");
3570 if (data_size > PD_EXT_MAX_CHUNK_DATA) {
3571 tcpm_pd_handle_state(port, CHUNK_NOT_SUPP, NONE_AMS, PD_T_CHUNK_NOT_SUPP);
3572 tcpm_log(port, "Chunk handling not yet supported");
3578 case PD_EXT_PPS_STATUS:
3579 if (port->ams == GETTING_SOURCE_SINK_STATUS) {
3580 tcpm_ams_finish(port);
3581 tcpm_set_state(port, ready_state(port), 0);
3583 /* unexpected Status or PPS_Status Message */
3584 tcpm_pd_handle_state(port, port->pwr_role == TYPEC_SOURCE ?
3585 SRC_SOFT_RESET_WAIT_SNK_TX : SNK_SOFT_RESET,
3589 case PD_EXT_SOURCE_CAP_EXT:
3590 case PD_EXT_GET_BATT_CAP:
3591 case PD_EXT_GET_BATT_STATUS:
3592 case PD_EXT_BATT_CAP:
3593 case PD_EXT_GET_MANUFACTURER_INFO:
3594 case PD_EXT_MANUFACTURER_INFO:
3595 case PD_EXT_SECURITY_REQUEST:
3596 case PD_EXT_SECURITY_RESPONSE:
3597 case PD_EXT_FW_UPDATE_REQUEST:
3598 case PD_EXT_FW_UPDATE_RESPONSE:
3599 case PD_EXT_COUNTRY_INFO:
3600 case PD_EXT_COUNTRY_CODES:
3601 tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
3604 tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
3605 tcpm_log(port, "Unrecognized extended message type %#x", type);
3610 static void tcpm_pd_rx_handler(struct kthread_work *work)
3612 struct pd_rx_event *event = container_of(work,
3613 struct pd_rx_event, work);
3614 const struct pd_message *msg = &event->msg;
3615 unsigned int cnt = pd_header_cnt_le(msg->header);
3616 struct tcpm_port *port = event->port;
3617 enum tcpm_transmit_type rx_sop_type = event->rx_sop_type;
3619 mutex_lock(&port->lock);
3621 tcpm_log(port, "PD RX, header: %#x [%d]", le16_to_cpu(msg->header),
3624 if (port->attached) {
3625 enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
3626 unsigned int msgid = pd_header_msgid_le(msg->header);
3629 * Drop SOP' messages if cannot receive via
3630 * tcpm_can_communicate_sop_prime
3632 if (rx_sop_type == TCPC_TX_SOP_PRIME &&
3633 !tcpm_can_communicate_sop_prime(port))
3637 * USB PD standard, 6.6.1.2:
3638 * "... if MessageID value in a received Message is the
3639 * same as the stored value, the receiver shall return a
3640 * GoodCRC Message with that MessageID value and drop
3641 * the Message (this is a retry of an already received
3642 * Message). Note: this shall not apply to the Soft_Reset
3643 * Message which always has a MessageID value of zero."
3645 switch (rx_sop_type) {
3646 case TCPC_TX_SOP_PRIME:
3647 if (msgid == port->rx_msgid_prime)
3649 port->rx_msgid_prime = msgid;
3653 if (msgid == port->rx_msgid && type != PD_CTRL_SOFT_RESET)
3655 port->rx_msgid = msgid;
3660 * If both ends believe to be DFP/host, we have a data role
3663 if (!!(le16_to_cpu(msg->header) & PD_HEADER_DATA_ROLE) ==
3664 (port->data_role == TYPEC_HOST) && rx_sop_type == TCPC_TX_SOP) {
3666 "Data role mismatch, initiating error recovery");
3667 tcpm_set_state(port, ERROR_RECOVERY, 0);
3669 if (le16_to_cpu(msg->header) & PD_HEADER_EXT_HDR)
3670 tcpm_pd_ext_msg_request(port, msg);
3672 tcpm_pd_data_request(port, msg, rx_sop_type);
3674 tcpm_pd_ctrl_request(port, msg, rx_sop_type);
3679 mutex_unlock(&port->lock);
3683 void tcpm_pd_receive(struct tcpm_port *port, const struct pd_message *msg,
3684 enum tcpm_transmit_type rx_sop_type)
3686 struct pd_rx_event *event;
3688 event = kzalloc(sizeof(*event), GFP_ATOMIC);
3692 kthread_init_work(&event->work, tcpm_pd_rx_handler);
3694 event->rx_sop_type = rx_sop_type;
3695 memcpy(&event->msg, msg, sizeof(*msg));
3696 kthread_queue_work(port->wq, &event->work);
3698 EXPORT_SYMBOL_GPL(tcpm_pd_receive);
3700 static int tcpm_pd_send_control(struct tcpm_port *port,
3701 enum pd_ctrl_msg_type type,
3702 enum tcpm_transmit_type tx_sop_type)
3704 struct pd_message msg;
3706 memset(&msg, 0, sizeof(msg));
3707 switch (tx_sop_type) {
3708 case TCPC_TX_SOP_PRIME:
3709 msg.header = PD_HEADER_LE(type,
3710 0, /* Cable Plug Indicator for DFP/UFP */
3712 port->negotiated_rev,
3713 port->message_id_prime,
3717 msg.header = PD_HEADER_LE(type,
3720 port->negotiated_rev,
3725 msg.header = PD_HEADER_LE(type,
3728 port->negotiated_rev,
3734 return tcpm_pd_transmit(port, tx_sop_type, &msg);
3738 * Send queued message without affecting state.
3739 * Return true if state machine should go back to sleep,
3742 static bool tcpm_send_queued_message(struct tcpm_port *port)
3744 enum pd_msg_request queued_message;
3748 queued_message = port->queued_message;
3749 port->queued_message = PD_MSG_NONE;
3751 switch (queued_message) {
3752 case PD_MSG_CTRL_WAIT:
3753 tcpm_pd_send_control(port, PD_CTRL_WAIT, TCPC_TX_SOP);
3755 case PD_MSG_CTRL_REJECT:
3756 tcpm_pd_send_control(port, PD_CTRL_REJECT, TCPC_TX_SOP);
3758 case PD_MSG_CTRL_NOT_SUPP:
3759 tcpm_pd_send_control(port, PD_CTRL_NOT_SUPP, TCPC_TX_SOP);
3761 case PD_MSG_DATA_SINK_CAP:
3762 ret = tcpm_pd_send_sink_caps(port);
3764 tcpm_log(port, "Unable to send snk caps, ret=%d", ret);
3765 tcpm_set_state(port, SNK_SOFT_RESET, 0);
3767 tcpm_ams_finish(port);
3769 case PD_MSG_DATA_SOURCE_CAP:
3770 ret = tcpm_pd_send_source_caps(port);
3773 "Unable to send src caps, ret=%d",
3775 tcpm_set_state(port, SOFT_RESET_SEND, 0);
3776 } else if (port->pwr_role == TYPEC_SOURCE) {
3777 tcpm_ams_finish(port);
3778 tcpm_set_state(port, HARD_RESET_SEND,
3779 PD_T_SENDER_RESPONSE);
3781 tcpm_ams_finish(port);
3787 } while (port->queued_message != PD_MSG_NONE);
3789 if (port->delayed_state != INVALID_STATE) {
3790 if (ktime_after(port->delayed_runtime, ktime_get())) {
3791 mod_tcpm_delayed_work(port, ktime_to_ms(ktime_sub(port->delayed_runtime,
3795 port->delayed_state = INVALID_STATE;
3800 static int tcpm_pd_check_request(struct tcpm_port *port)
3802 u32 pdo, rdo = port->sink_request;
3803 unsigned int max, op, pdo_max, index;
3804 enum pd_pdo_type type;
3806 index = rdo_index(rdo);
3807 if (!index || index > port->nr_src_pdo)
3810 pdo = port->src_pdo[index - 1];
3811 type = pdo_type(pdo);
3813 case PDO_TYPE_FIXED:
3815 max = rdo_max_current(rdo);
3816 op = rdo_op_current(rdo);
3817 pdo_max = pdo_max_current(pdo);
3821 if (max > pdo_max && !(rdo & RDO_CAP_MISMATCH))
3824 if (type == PDO_TYPE_FIXED)
3826 "Requested %u mV, %u mA for %u / %u mA",
3827 pdo_fixed_voltage(pdo), pdo_max, op, max);
3830 "Requested %u -> %u mV, %u mA for %u / %u mA",
3831 pdo_min_voltage(pdo), pdo_max_voltage(pdo),
3835 max = rdo_max_power(rdo);
3836 op = rdo_op_power(rdo);
3837 pdo_max = pdo_max_power(pdo);
3841 if (max > pdo_max && !(rdo & RDO_CAP_MISMATCH))
3844 "Requested %u -> %u mV, %u mW for %u / %u mW",
3845 pdo_min_voltage(pdo), pdo_max_voltage(pdo),
3852 port->op_vsafe5v = index == 1;
3857 #define min_power(x, y) min(pdo_max_power(x), pdo_max_power(y))
3858 #define min_current(x, y) min(pdo_max_current(x), pdo_max_current(y))
3860 static int tcpm_pd_select_pdo(struct tcpm_port *port, int *sink_pdo,
3863 unsigned int i, j, max_src_mv = 0, min_src_mv = 0, max_mw = 0,
3864 max_mv = 0, src_mw = 0, src_ma = 0, max_snk_mv = 0,
3868 port->pps_data.supported = false;
3869 port->usb_type = POWER_SUPPLY_USB_TYPE_PD;
3870 power_supply_changed(port->psy);
3873 * Select the source PDO providing the most power which has a
3876 for (i = 0; i < port->nr_source_caps; i++) {
3877 u32 pdo = port->source_caps[i];
3878 enum pd_pdo_type type = pdo_type(pdo);
3881 case PDO_TYPE_FIXED:
3882 max_src_mv = pdo_fixed_voltage(pdo);
3883 min_src_mv = max_src_mv;
3887 max_src_mv = pdo_max_voltage(pdo);
3888 min_src_mv = pdo_min_voltage(pdo);
3891 if (pdo_apdo_type(pdo) == APDO_TYPE_PPS) {
3892 port->pps_data.supported = true;
3894 POWER_SUPPLY_USB_TYPE_PD_PPS;
3895 power_supply_changed(port->psy);
3899 tcpm_log(port, "Invalid source PDO type, ignoring");
3904 case PDO_TYPE_FIXED:
3906 src_ma = pdo_max_current(pdo);
3907 src_mw = src_ma * min_src_mv / 1000;
3910 src_mw = pdo_max_power(pdo);
3915 tcpm_log(port, "Invalid source PDO type, ignoring");
3919 for (j = 0; j < port->nr_snk_pdo; j++) {
3920 pdo = port->snk_pdo[j];
3922 switch (pdo_type(pdo)) {
3923 case PDO_TYPE_FIXED:
3924 max_snk_mv = pdo_fixed_voltage(pdo);
3925 min_snk_mv = max_snk_mv;
3929 max_snk_mv = pdo_max_voltage(pdo);
3930 min_snk_mv = pdo_min_voltage(pdo);
3935 tcpm_log(port, "Invalid sink PDO type, ignoring");
3939 if (max_src_mv <= max_snk_mv &&
3940 min_src_mv >= min_snk_mv) {
3941 /* Prefer higher voltages if available */
3942 if ((src_mw == max_mw && min_src_mv > max_mv) ||
3947 max_mv = min_src_mv;
3957 static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
3959 unsigned int i, src_ma, max_temp_mw = 0, max_op_ma, op_mw;
3960 unsigned int src_pdo = 0;
3963 for (i = 1; i < port->nr_source_caps; ++i) {
3964 pdo = port->source_caps[i];
3966 switch (pdo_type(pdo)) {
3968 if (pdo_apdo_type(pdo) != APDO_TYPE_PPS) {
3969 tcpm_log(port, "Not PPS APDO (source), ignoring");
3973 if (port->pps_data.req_out_volt > pdo_pps_apdo_max_voltage(pdo) ||
3974 port->pps_data.req_out_volt < pdo_pps_apdo_min_voltage(pdo))
3977 src_ma = pdo_pps_apdo_max_current(pdo);
3978 max_op_ma = min(src_ma, port->pps_data.req_op_curr);
3979 op_mw = max_op_ma * port->pps_data.req_out_volt / 1000;
3980 if (op_mw > max_temp_mw) {
3982 max_temp_mw = op_mw;
3986 tcpm_log(port, "Not APDO type (source), ignoring");
3992 src = port->source_caps[src_pdo];
3994 port->pps_data.req_min_volt = pdo_pps_apdo_min_voltage(src);
3995 port->pps_data.req_max_volt = pdo_pps_apdo_max_voltage(src);
3996 port->pps_data.req_max_curr = pdo_pps_apdo_max_current(src);
3997 port->pps_data.req_op_curr = min(port->pps_data.req_max_curr,
3998 port->pps_data.req_op_curr);
4004 static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo)
4006 unsigned int mv, ma, mw, flags;
4007 unsigned int max_ma, max_mw;
4008 enum pd_pdo_type type;
4009 u32 pdo, matching_snk_pdo;
4010 int src_pdo_index = 0;
4011 int snk_pdo_index = 0;
4014 ret = tcpm_pd_select_pdo(port, &snk_pdo_index, &src_pdo_index);
4018 pdo = port->source_caps[src_pdo_index];
4019 matching_snk_pdo = port->snk_pdo[snk_pdo_index];
4020 type = pdo_type(pdo);
4023 case PDO_TYPE_FIXED:
4024 mv = pdo_fixed_voltage(pdo);
4028 mv = pdo_min_voltage(pdo);
4031 tcpm_log(port, "Invalid PDO selected!");
4035 /* Select maximum available current within the sink pdo's limit */
4036 if (type == PDO_TYPE_BATT) {
4037 mw = min_power(pdo, matching_snk_pdo);
4038 ma = 1000 * mw / mv;
4040 ma = min_current(pdo, matching_snk_pdo);
4041 mw = ma * mv / 1000;
4044 flags = RDO_USB_COMM | RDO_NO_SUSPEND;
4046 /* Set mismatch bit if offered power is less than operating power */
4049 if (mw < port->operating_snk_mw) {
4050 flags |= RDO_CAP_MISMATCH;
4051 if (type == PDO_TYPE_BATT &&
4052 (pdo_max_power(matching_snk_pdo) > pdo_max_power(pdo)))
4053 max_mw = pdo_max_power(matching_snk_pdo);
4054 else if (pdo_max_current(matching_snk_pdo) >
4055 pdo_max_current(pdo))
4056 max_ma = pdo_max_current(matching_snk_pdo);
4059 tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d",
4060 port->cc_req, port->cc1, port->cc2, port->vbus_source,
4061 port->vconn_role == TYPEC_SOURCE ? "source" : "sink",
4064 if (type == PDO_TYPE_BATT) {
4065 *rdo = RDO_BATT(src_pdo_index + 1, mw, max_mw, flags);
4067 tcpm_log(port, "Requesting PDO %d: %u mV, %u mW%s",
4068 src_pdo_index, mv, mw,
4069 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
4071 *rdo = RDO_FIXED(src_pdo_index + 1, ma, max_ma, flags);
4073 tcpm_log(port, "Requesting PDO %d: %u mV, %u mA%s",
4074 src_pdo_index, mv, ma,
4075 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
4078 port->req_current_limit = ma;
4079 port->req_supply_voltage = mv;
4084 static int tcpm_pd_send_request(struct tcpm_port *port)
4086 struct pd_message msg;
4090 ret = tcpm_pd_build_request(port, &rdo);
4095 * Relax the threshold as voltage will be adjusted after Accept Message plus tSrcTransition.
4096 * It is safer to modify the threshold here.
4098 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0);
4100 memset(&msg, 0, sizeof(msg));
4101 msg.header = PD_HEADER_LE(PD_DATA_REQUEST,
4104 port->negotiated_rev,
4105 port->message_id, 1);
4106 msg.payload[0] = cpu_to_le32(rdo);
4108 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
4111 static int tcpm_pd_build_pps_request(struct tcpm_port *port, u32 *rdo)
4113 unsigned int out_mv, op_ma, op_mw, max_mv, max_ma, flags;
4114 unsigned int src_pdo_index;
4116 src_pdo_index = tcpm_pd_select_pps_apdo(port);
4120 max_mv = port->pps_data.req_max_volt;
4121 max_ma = port->pps_data.req_max_curr;
4122 out_mv = port->pps_data.req_out_volt;
4123 op_ma = port->pps_data.req_op_curr;
4125 flags = RDO_USB_COMM | RDO_NO_SUSPEND;
4127 op_mw = (op_ma * out_mv) / 1000;
4128 if (op_mw < port->operating_snk_mw) {
4130 * Try raising current to meet power needs. If that's not enough
4131 * then try upping the voltage. If that's still not enough
4132 * then we've obviously chosen a PPS APDO which really isn't
4133 * suitable so abandon ship.
4135 op_ma = (port->operating_snk_mw * 1000) / out_mv;
4136 if ((port->operating_snk_mw * 1000) % out_mv)
4138 op_ma += RDO_PROG_CURR_MA_STEP - (op_ma % RDO_PROG_CURR_MA_STEP);
4140 if (op_ma > max_ma) {
4142 out_mv = (port->operating_snk_mw * 1000) / op_ma;
4143 if ((port->operating_snk_mw * 1000) % op_ma)
4145 out_mv += RDO_PROG_VOLT_MV_STEP -
4146 (out_mv % RDO_PROG_VOLT_MV_STEP);
4148 if (out_mv > max_mv) {
4149 tcpm_log(port, "Invalid PPS APDO selected!");
4155 tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d",
4156 port->cc_req, port->cc1, port->cc2, port->vbus_source,
4157 port->vconn_role == TYPEC_SOURCE ? "source" : "sink",
4160 *rdo = RDO_PROG(src_pdo_index + 1, out_mv, op_ma, flags);
4162 tcpm_log(port, "Requesting APDO %d: %u mV, %u mA",
4163 src_pdo_index, out_mv, op_ma);
4165 port->pps_data.req_op_curr = op_ma;
4166 port->pps_data.req_out_volt = out_mv;
4171 static int tcpm_pd_send_pps_request(struct tcpm_port *port)
4173 struct pd_message msg;
4177 ret = tcpm_pd_build_pps_request(port, &rdo);
4181 /* Relax the threshold as voltage will be adjusted right after Accept Message. */
4182 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0);
4184 memset(&msg, 0, sizeof(msg));
4185 msg.header = PD_HEADER_LE(PD_DATA_REQUEST,
4188 port->negotiated_rev,
4189 port->message_id, 1);
4190 msg.payload[0] = cpu_to_le32(rdo);
4192 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
4195 static int tcpm_set_vbus(struct tcpm_port *port, bool enable)
4199 if (enable && port->vbus_charge)
4202 tcpm_log(port, "vbus:=%d charge=%d", enable, port->vbus_charge);
4204 ret = port->tcpc->set_vbus(port->tcpc, enable, port->vbus_charge);
4208 port->vbus_source = enable;
4212 static int tcpm_set_charge(struct tcpm_port *port, bool charge)
4216 if (charge && port->vbus_source)
4219 if (charge != port->vbus_charge) {
4220 tcpm_log(port, "vbus=%d charge:=%d", port->vbus_source, charge);
4221 ret = port->tcpc->set_vbus(port->tcpc, port->vbus_source,
4226 port->vbus_charge = charge;
4227 power_supply_changed(port->psy);
4231 static bool tcpm_start_toggling(struct tcpm_port *port, enum typec_cc_status cc)
4235 if (!port->tcpc->start_toggling)
4238 tcpm_log_force(port, "Start toggling");
4239 ret = port->tcpc->start_toggling(port->tcpc, port->port_type, cc);
4243 static int tcpm_init_vbus(struct tcpm_port *port)
4247 ret = port->tcpc->set_vbus(port->tcpc, false, false);
4248 port->vbus_source = false;
4249 port->vbus_charge = false;
4253 static int tcpm_init_vconn(struct tcpm_port *port)
4257 ret = port->tcpc->set_vconn(port->tcpc, false);
4258 port->vconn_role = TYPEC_SINK;
4262 static void tcpm_typec_connect(struct tcpm_port *port)
4264 struct typec_partner *partner;
4266 if (!port->connected) {
4267 port->connected = true;
4268 /* Make sure we don't report stale identity information */
4269 memset(&port->partner_ident, 0, sizeof(port->partner_ident));
4270 port->partner_desc.usb_pd = port->pd_capable;
4271 if (tcpm_port_is_debug(port))
4272 port->partner_desc.accessory = TYPEC_ACCESSORY_DEBUG;
4273 else if (tcpm_port_is_audio(port))
4274 port->partner_desc.accessory = TYPEC_ACCESSORY_AUDIO;
4276 port->partner_desc.accessory = TYPEC_ACCESSORY_NONE;
4277 partner = typec_register_partner(port->typec_port, &port->partner_desc);
4278 if (IS_ERR(partner)) {
4279 dev_err(port->dev, "Failed to register partner (%ld)\n", PTR_ERR(partner));
4283 port->partner = partner;
4284 typec_partner_set_usb_power_delivery(port->partner, port->partner_pd);
4288 static int tcpm_src_attach(struct tcpm_port *port)
4290 enum typec_cc_polarity polarity =
4291 port->cc2 == TYPEC_CC_RD ? TYPEC_POLARITY_CC2
4292 : TYPEC_POLARITY_CC1;
4298 ret = tcpm_set_polarity(port, polarity);
4302 tcpm_enable_auto_vbus_discharge(port, true);
4304 ret = tcpm_set_roles(port, true, TYPEC_SOURCE, tcpm_data_role_for_source(port));
4308 if (port->pd_supported) {
4309 ret = port->tcpc->set_pd_rx(port->tcpc, true);
4311 goto out_disable_mux;
4315 * USB Type-C specification, version 1.2,
4316 * chapter 4.5.2.2.8.1 (Attached.SRC Requirements)
4317 * Enable VCONN only if the non-RD port is set to RA.
4319 if ((polarity == TYPEC_POLARITY_CC1 && port->cc2 == TYPEC_CC_RA) ||
4320 (polarity == TYPEC_POLARITY_CC2 && port->cc1 == TYPEC_CC_RA)) {
4321 ret = tcpm_set_vconn(port, true);
4323 goto out_disable_pd;
4326 ret = tcpm_set_vbus(port, true);
4328 goto out_disable_vconn;
4330 port->pd_capable = false;
4332 port->partner = NULL;
4334 port->attached = true;
4335 port->send_discover = true;
4336 port->send_discover_prime = false;
4341 tcpm_set_vconn(port, false);
4343 if (port->pd_supported)
4344 port->tcpc->set_pd_rx(port->tcpc, false);
4346 tcpm_mux_set(port, TYPEC_STATE_SAFE, USB_ROLE_NONE,
4347 TYPEC_ORIENTATION_NONE);
4351 static void tcpm_typec_disconnect(struct tcpm_port *port)
4354 * Unregister plug/cable outside of port->connected because cable can
4355 * be discovered before SRC_READY/SNK_READY states where port->connected
4358 typec_unregister_plug(port->plug_prime);
4359 typec_unregister_cable(port->cable);
4360 port->plug_prime = NULL;
4362 if (port->connected) {
4363 if (port->partner) {
4364 typec_partner_set_usb_power_delivery(port->partner, NULL);
4365 typec_unregister_partner(port->partner);
4366 port->partner = NULL;
4368 port->connected = false;
4372 static void tcpm_unregister_altmodes(struct tcpm_port *port)
4374 struct pd_mode_data *modep = &port->mode_data;
4375 struct pd_mode_data *modep_prime = &port->mode_data_prime;
4378 for (i = 0; i < modep->altmodes; i++) {
4379 typec_unregister_altmode(port->partner_altmode[i]);
4380 port->partner_altmode[i] = NULL;
4382 for (i = 0; i < modep_prime->altmodes; i++) {
4383 typec_unregister_altmode(port->plug_prime_altmode[i]);
4384 port->plug_prime_altmode[i] = NULL;
4387 memset(modep, 0, sizeof(*modep));
4388 memset(modep_prime, 0, sizeof(*modep_prime));
4391 static void tcpm_set_partner_usb_comm_capable(struct tcpm_port *port, bool capable)
4393 tcpm_log(port, "Setting usb_comm capable %s", capable ? "true" : "false");
4395 if (port->tcpc->set_partner_usb_comm_capable)
4396 port->tcpc->set_partner_usb_comm_capable(port->tcpc, capable);
4399 static void tcpm_reset_port(struct tcpm_port *port)
4401 tcpm_enable_auto_vbus_discharge(port, false);
4402 port->in_ams = false;
4403 port->ams = NONE_AMS;
4404 port->vdm_sm_running = false;
4405 tcpm_unregister_altmodes(port);
4406 tcpm_typec_disconnect(port);
4407 port->attached = false;
4408 port->pd_capable = false;
4409 port->pps_data.supported = false;
4410 tcpm_set_partner_usb_comm_capable(port, false);
4413 * First Rx ID should be 0; set this to a sentinel of -1 so that
4414 * we can check tcpm_pd_rx_handler() if we had seen it before.
4416 port->rx_msgid = -1;
4417 port->rx_msgid_prime = -1;
4419 port->tcpc->set_pd_rx(port->tcpc, false);
4420 tcpm_init_vbus(port); /* also disables charging */
4421 tcpm_init_vconn(port);
4422 tcpm_set_current_limit(port, 0, 0);
4423 tcpm_set_polarity(port, TYPEC_POLARITY_CC1);
4424 tcpm_mux_set(port, TYPEC_STATE_SAFE, USB_ROLE_NONE,
4425 TYPEC_ORIENTATION_NONE);
4426 tcpm_set_attached_state(port, false);
4427 port->try_src_count = 0;
4428 port->try_snk_count = 0;
4429 port->usb_type = POWER_SUPPLY_USB_TYPE_C;
4430 power_supply_changed(port->psy);
4431 port->nr_sink_caps = 0;
4432 port->sink_cap_done = false;
4433 if (port->tcpc->enable_frs)
4434 port->tcpc->enable_frs(port->tcpc, false);
4436 usb_power_delivery_unregister_capabilities(port->partner_sink_caps);
4437 port->partner_sink_caps = NULL;
4438 usb_power_delivery_unregister_capabilities(port->partner_source_caps);
4439 port->partner_source_caps = NULL;
4440 usb_power_delivery_unregister(port->partner_pd);
4441 port->partner_pd = NULL;
4444 static void tcpm_detach(struct tcpm_port *port)
4446 if (tcpm_port_is_disconnected(port))
4447 port->hard_reset_count = 0;
4449 if (!port->attached)
4452 if (port->tcpc->set_bist_data) {
4453 tcpm_log(port, "disable BIST MODE TESTDATA");
4454 port->tcpc->set_bist_data(port->tcpc, false);
4457 tcpm_reset_port(port);
4460 static void tcpm_src_detach(struct tcpm_port *port)
4465 static int tcpm_snk_attach(struct tcpm_port *port)
4472 ret = tcpm_set_polarity(port, port->cc2 != TYPEC_CC_OPEN ?
4473 TYPEC_POLARITY_CC2 : TYPEC_POLARITY_CC1);
4477 tcpm_enable_auto_vbus_discharge(port, true);
4479 ret = tcpm_set_roles(port, true, TYPEC_SINK, tcpm_data_role_for_sink(port));
4483 port->pd_capable = false;
4485 port->partner = NULL;
4487 port->attached = true;
4488 port->send_discover = true;
4489 port->send_discover_prime = false;
4494 static void tcpm_snk_detach(struct tcpm_port *port)
4499 static int tcpm_acc_attach(struct tcpm_port *port)
4506 ret = tcpm_set_roles(port, true, TYPEC_SOURCE,
4507 tcpm_data_role_for_source(port));
4511 port->partner = NULL;
4513 tcpm_typec_connect(port);
4515 port->attached = true;
4520 static void tcpm_acc_detach(struct tcpm_port *port)
4525 static inline enum tcpm_state hard_reset_state(struct tcpm_port *port)
4527 if (port->hard_reset_count < PD_N_HARD_RESET_COUNT)
4528 return HARD_RESET_SEND;
4529 if (port->pd_capable)
4530 return ERROR_RECOVERY;
4531 if (port->pwr_role == TYPEC_SOURCE)
4532 return SRC_UNATTACHED;
4533 if (port->state == SNK_WAIT_CAPABILITIES ||
4534 port->state == SNK_WAIT_CAPABILITIES_TIMEOUT)
4536 return SNK_UNATTACHED;
4539 static inline enum tcpm_state unattached_state(struct tcpm_port *port)
4541 if (port->port_type == TYPEC_PORT_DRP) {
4542 if (port->pwr_role == TYPEC_SOURCE)
4543 return SRC_UNATTACHED;
4545 return SNK_UNATTACHED;
4546 } else if (port->port_type == TYPEC_PORT_SRC) {
4547 return SRC_UNATTACHED;
4550 return SNK_UNATTACHED;
4553 static void tcpm_swap_complete(struct tcpm_port *port, int result)
4555 if (port->swap_pending) {
4556 port->swap_status = result;
4557 port->swap_pending = false;
4558 port->non_pd_role_swap = false;
4559 complete(&port->swap_complete);
4563 static enum typec_pwr_opmode tcpm_get_pwr_opmode(enum typec_cc_status cc)
4566 case TYPEC_CC_RP_1_5:
4567 return TYPEC_PWR_MODE_1_5A;
4568 case TYPEC_CC_RP_3_0:
4569 return TYPEC_PWR_MODE_3_0A;
4570 case TYPEC_CC_RP_DEF:
4572 return TYPEC_PWR_MODE_USB;
4576 static enum typec_cc_status tcpm_pwr_opmode_to_rp(enum typec_pwr_opmode opmode)
4579 case TYPEC_PWR_MODE_USB:
4580 return TYPEC_CC_RP_DEF;
4581 case TYPEC_PWR_MODE_1_5A:
4582 return TYPEC_CC_RP_1_5;
4583 case TYPEC_PWR_MODE_3_0A:
4584 case TYPEC_PWR_MODE_PD:
4586 return TYPEC_CC_RP_3_0;
4590 static void tcpm_set_initial_svdm_version(struct tcpm_port *port)
4595 switch (port->negotiated_rev) {
4599 * 6.4.4.2.3 Structured VDM Version
4600 * 2.0 states "At this time, there is only one version (1.0) defined.
4601 * This field Shall be set to zero to indicate Version 1.0."
4602 * 3.0 states "This field Shall be set to 01b to indicate Version 2.0."
4603 * To ensure that we follow the Power Delivery revision we are currently
4604 * operating on, downgrade the SVDM version to the highest one supported
4605 * by the Power Delivery revision.
4608 typec_partner_set_svdm_version(port->partner, SVDM_VER_1_0);
4611 typec_partner_set_svdm_version(port->partner, SVDM_VER_1_0);
4616 static void run_state_machine(struct tcpm_port *port)
4619 enum typec_pwr_opmode opmode;
4621 enum tcpm_state upcoming_state;
4623 if (port->tcpc->check_contaminant && port->state != CHECK_CONTAMINANT)
4624 port->potential_contaminant = ((port->enter_state == SRC_ATTACH_WAIT &&
4625 port->state == SRC_UNATTACHED) ||
4626 (port->enter_state == SNK_ATTACH_WAIT &&
4627 port->state == SNK_UNATTACHED) ||
4628 (port->enter_state == SNK_DEBOUNCED &&
4629 port->state == SNK_UNATTACHED));
4631 port->enter_state = port->state;
4632 switch (port->state) {
4635 case CHECK_CONTAMINANT:
4636 port->tcpc->check_contaminant(port->tcpc);
4639 case SRC_UNATTACHED:
4640 if (!port->non_pd_role_swap)
4641 tcpm_swap_complete(port, -ENOTCONN);
4642 tcpm_src_detach(port);
4643 if (port->potential_contaminant) {
4644 tcpm_set_state(port, CHECK_CONTAMINANT, 0);
4647 if (tcpm_start_toggling(port, tcpm_rp_cc(port))) {
4648 tcpm_set_state(port, TOGGLING, 0);
4651 tcpm_set_cc(port, tcpm_rp_cc(port));
4652 if (port->port_type == TYPEC_PORT_DRP)
4653 tcpm_set_state(port, SNK_UNATTACHED, PD_T_DRP_SNK);
4655 case SRC_ATTACH_WAIT:
4656 if (tcpm_port_is_debug(port))
4657 tcpm_set_state(port, DEBUG_ACC_ATTACHED,
4658 port->timings.cc_debounce_time);
4659 else if (tcpm_port_is_audio(port))
4660 tcpm_set_state(port, AUDIO_ACC_ATTACHED,
4661 port->timings.cc_debounce_time);
4662 else if (tcpm_port_is_source(port) && port->vbus_vsafe0v)
4663 tcpm_set_state(port,
4664 tcpm_try_snk(port) ? SNK_TRY
4666 port->timings.cc_debounce_time);
4670 port->try_snk_count++;
4673 * - Do not drive vconn or vbus
4674 * - Terminate CC pins (both) to Rd
4676 * - Wait for tDRPTry (PD_T_DRP_TRY).
4677 * Until then, ignore any state changes.
4679 tcpm_set_cc(port, TYPEC_CC_RD);
4680 tcpm_set_state(port, SNK_TRY_WAIT, PD_T_DRP_TRY);
4683 if (tcpm_port_is_sink(port)) {
4684 tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE, 0);
4686 tcpm_set_state(port, SRC_TRYWAIT, 0);
4690 case SNK_TRY_WAIT_DEBOUNCE:
4691 tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS,
4692 PD_T_TRY_CC_DEBOUNCE);
4694 case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS:
4695 if (port->vbus_present && tcpm_port_is_sink(port))
4696 tcpm_set_state(port, SNK_ATTACHED, 0);
4701 tcpm_set_cc(port, tcpm_rp_cc(port));
4702 if (port->max_wait == 0) {
4703 port->max_wait = jiffies +
4704 msecs_to_jiffies(PD_T_DRP_TRY);
4705 tcpm_set_state(port, SRC_TRYWAIT_UNATTACHED,
4708 if (time_is_after_jiffies(port->max_wait))
4709 tcpm_set_state(port, SRC_TRYWAIT_UNATTACHED,
4710 jiffies_to_msecs(port->max_wait -
4713 tcpm_set_state(port, SNK_UNATTACHED, 0);
4716 case SRC_TRYWAIT_DEBOUNCE:
4717 tcpm_set_state(port, SRC_ATTACHED, port->timings.cc_debounce_time);
4719 case SRC_TRYWAIT_UNATTACHED:
4720 tcpm_set_state(port, SNK_UNATTACHED, 0);
4724 ret = tcpm_src_attach(port);
4725 tcpm_set_state(port, SRC_UNATTACHED,
4726 ret < 0 ? 0 : PD_T_PS_SOURCE_ON);
4729 opmode = tcpm_get_pwr_opmode(tcpm_rp_cc(port));
4730 typec_set_pwr_opmode(port->typec_port, opmode);
4731 port->pwr_opmode = TYPEC_PWR_MODE_USB;
4732 port->caps_count = 0;
4733 port->negotiated_rev = PD_MAX_REV;
4734 port->negotiated_rev_prime = PD_MAX_REV;
4735 port->message_id = 0;
4736 port->message_id_prime = 0;
4737 port->rx_msgid = -1;
4738 port->rx_msgid_prime = -1;
4739 port->explicit_contract = false;
4740 /* SNK -> SRC POWER/FAST_ROLE_SWAP finished */
4741 if (port->ams == POWER_ROLE_SWAP ||
4742 port->ams == FAST_ROLE_SWAP)
4743 tcpm_ams_finish(port);
4744 if (!port->pd_supported) {
4745 tcpm_set_state(port, SRC_READY, 0);
4748 port->upcoming_state = SRC_SEND_CAPABILITIES;
4749 tcpm_ams_start(port, POWER_NEGOTIATION);
4751 case SRC_SEND_CAPABILITIES:
4753 if (port->caps_count > PD_N_CAPS_COUNT) {
4754 tcpm_set_state(port, SRC_READY, 0);
4757 ret = tcpm_pd_send_source_caps(port);
4759 if (tcpm_can_communicate_sop_prime(port) &&
4760 IS_ERR_OR_NULL(port->cable))
4761 tcpm_set_state(port, SRC_VDM_IDENTITY_REQUEST, 0);
4763 tcpm_set_state(port, SRC_SEND_CAPABILITIES,
4764 PD_T_SEND_SOURCE_CAP);
4767 * Per standard, we should clear the reset counter here.
4768 * However, that can result in state machine hang-ups.
4769 * Reset it only in READY state to improve stability.
4771 /* port->hard_reset_count = 0; */
4772 port->caps_count = 0;
4773 port->pd_capable = true;
4774 tcpm_set_state_cond(port, SRC_SEND_CAPABILITIES_TIMEOUT,
4775 PD_T_SEND_SOURCE_CAP);
4778 case SRC_SEND_CAPABILITIES_TIMEOUT:
4780 * Error recovery for a PD_DATA_SOURCE_CAP reply timeout.
4782 * PD 2.0 sinks are supposed to accept src-capabilities with a
4783 * 3.0 header and simply ignore any src PDOs which the sink does
4784 * not understand such as PPS but some 2.0 sinks instead ignore
4785 * the entire PD_DATA_SOURCE_CAP message, causing contract
4786 * negotiation to fail.
4788 * After PD_N_HARD_RESET_COUNT hard-reset attempts, we try
4789 * sending src-capabilities with a lower PD revision to
4790 * make these broken sinks work.
4792 if (port->hard_reset_count < PD_N_HARD_RESET_COUNT) {
4793 tcpm_set_state(port, HARD_RESET_SEND, 0);
4794 } else if (port->negotiated_rev > PD_REV20) {
4795 port->negotiated_rev--;
4796 port->hard_reset_count = 0;
4797 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
4799 tcpm_set_state(port, hard_reset_state(port), 0);
4802 case SRC_NEGOTIATE_CAPABILITIES:
4803 ret = tcpm_pd_check_request(port);
4805 tcpm_pd_send_control(port, PD_CTRL_REJECT, TCPC_TX_SOP);
4806 if (!port->explicit_contract) {
4807 tcpm_set_state(port,
4808 SRC_WAIT_NEW_CAPABILITIES, 0);
4810 tcpm_set_state(port, SRC_READY, 0);
4813 tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
4814 tcpm_set_partner_usb_comm_capable(port,
4815 !!(port->sink_request & RDO_USB_COMM));
4816 tcpm_set_state(port, SRC_TRANSITION_SUPPLY,
4817 PD_T_SRC_TRANSITION);
4820 case SRC_TRANSITION_SUPPLY:
4821 /* XXX: regulator_set_voltage(vbus, ...) */
4822 tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP);
4823 port->explicit_contract = true;
4824 typec_set_pwr_opmode(port->typec_port, TYPEC_PWR_MODE_PD);
4825 port->pwr_opmode = TYPEC_PWR_MODE_PD;
4826 tcpm_set_state_cond(port, SRC_READY, 0);
4830 port->hard_reset_count = 0;
4832 port->try_src_count = 0;
4834 tcpm_swap_complete(port, 0);
4835 tcpm_typec_connect(port);
4837 if (port->ams != NONE_AMS)
4838 tcpm_ams_finish(port);
4839 if (port->next_ams != NONE_AMS) {
4840 port->ams = port->next_ams;
4841 port->next_ams = NONE_AMS;
4845 * If previous AMS is interrupted, switch to the upcoming
4848 if (port->upcoming_state != INVALID_STATE) {
4849 upcoming_state = port->upcoming_state;
4850 port->upcoming_state = INVALID_STATE;
4851 tcpm_set_state(port, upcoming_state, 0);
4856 * 6.4.4.3.1 Discover Identity
4857 * "The Discover Identity Command Shall only be sent to SOP when there is an
4858 * Explicit Contract."
4860 * Discover Identity on SOP' should be discovered prior to the
4861 * ready state, but if done after a Vconn Swap following Discover
4862 * Identity on SOP then the discovery process can be run here
4865 if (port->explicit_contract) {
4866 if (port->send_discover_prime) {
4867 port->tx_sop_type = TCPC_TX_SOP_PRIME;
4869 port->tx_sop_type = TCPC_TX_SOP;
4870 tcpm_set_initial_svdm_version(port);
4872 mod_send_discover_delayed_work(port, 0);
4874 port->send_discover = false;
4875 port->send_discover_prime = false;
4880 * Sending ping messages is not necessary if
4881 * - the source operates at vSafe5V
4883 * - The system is not operating in PD mode
4885 * - Both partners are connected using a Type-C connector
4887 * There is no actual need to send PD messages since the local
4888 * port type-c and the spec does not clearly say whether PD is
4889 * possible when type-c is connected to Type-A/B
4892 case SRC_WAIT_NEW_CAPABILITIES:
4893 /* Nothing to do... */
4897 case SNK_UNATTACHED:
4898 if (!port->non_pd_role_swap)
4899 tcpm_swap_complete(port, -ENOTCONN);
4900 tcpm_pps_complete(port, -ENOTCONN);
4901 tcpm_snk_detach(port);
4902 if (port->potential_contaminant) {
4903 tcpm_set_state(port, CHECK_CONTAMINANT, 0);
4906 if (tcpm_start_toggling(port, TYPEC_CC_RD)) {
4907 tcpm_set_state(port, TOGGLING, 0);
4910 tcpm_set_cc(port, TYPEC_CC_RD);
4911 if (port->port_type == TYPEC_PORT_DRP)
4912 tcpm_set_state(port, SRC_UNATTACHED, PD_T_DRP_SRC);
4914 case SNK_ATTACH_WAIT:
4915 if ((port->cc1 == TYPEC_CC_OPEN &&
4916 port->cc2 != TYPEC_CC_OPEN) ||
4917 (port->cc1 != TYPEC_CC_OPEN &&
4918 port->cc2 == TYPEC_CC_OPEN))
4919 tcpm_set_state(port, SNK_DEBOUNCED,
4920 port->timings.cc_debounce_time);
4921 else if (tcpm_port_is_disconnected(port))
4922 tcpm_set_state(port, SNK_UNATTACHED,
4926 if (tcpm_port_is_disconnected(port))
4927 tcpm_set_state(port, SNK_UNATTACHED,
4929 else if (port->vbus_present)
4930 tcpm_set_state(port,
4931 tcpm_try_src(port) ? SRC_TRY
4936 port->try_src_count++;
4937 tcpm_set_cc(port, tcpm_rp_cc(port));
4939 tcpm_set_state(port, SRC_TRY_WAIT, 0);
4942 if (port->max_wait == 0) {
4943 port->max_wait = jiffies +
4944 msecs_to_jiffies(PD_T_DRP_TRY);
4945 msecs = PD_T_DRP_TRY;
4947 if (time_is_after_jiffies(port->max_wait))
4948 msecs = jiffies_to_msecs(port->max_wait -
4953 tcpm_set_state(port, SNK_TRYWAIT, msecs);
4955 case SRC_TRY_DEBOUNCE:
4956 tcpm_set_state(port, SRC_ATTACHED, PD_T_PD_DEBOUNCE);
4959 tcpm_set_cc(port, TYPEC_CC_RD);
4960 tcpm_set_state(port, SNK_TRYWAIT_VBUS, port->timings.cc_debounce_time);
4962 case SNK_TRYWAIT_VBUS:
4964 * TCPM stays in this state indefinitely until VBUS
4965 * is detected as long as Rp is not detected for
4966 * more than a time period of tPDDebounce.
4968 if (port->vbus_present && tcpm_port_is_sink(port)) {
4969 tcpm_set_state(port, SNK_ATTACHED, 0);
4972 if (!tcpm_port_is_sink(port))
4973 tcpm_set_state(port, SNK_TRYWAIT_DEBOUNCE, 0);
4975 case SNK_TRYWAIT_DEBOUNCE:
4976 tcpm_set_state(port, SNK_UNATTACHED, PD_T_PD_DEBOUNCE);
4979 ret = tcpm_snk_attach(port);
4981 tcpm_set_state(port, SNK_UNATTACHED, 0);
4984 * For Type C port controllers that use Battery Charging
4985 * Detection (based on BCv1.2 spec) to detect USB
4986 * charger type, add a delay of "snk_bc12_cmpletion_time"
4987 * before transitioning to SNK_STARTUP to allow BC1.2
4988 * detection to complete before PD is eventually enabled
4991 tcpm_set_state(port, SNK_STARTUP,
4992 port->timings.snk_bc12_cmpletion_time);
4995 opmode = tcpm_get_pwr_opmode(port->polarity ?
4996 port->cc2 : port->cc1);
4997 typec_set_pwr_opmode(port->typec_port, opmode);
4998 port->pwr_opmode = TYPEC_PWR_MODE_USB;
4999 port->negotiated_rev = PD_MAX_REV;
5000 port->negotiated_rev_prime = PD_MAX_REV;
5001 port->message_id = 0;
5002 port->message_id_prime = 0;
5003 port->rx_msgid = -1;
5004 port->rx_msgid_prime = -1;
5005 port->explicit_contract = false;
5007 if (port->ams == POWER_ROLE_SWAP ||
5008 port->ams == FAST_ROLE_SWAP)
5009 /* SRC -> SNK POWER/FAST_ROLE_SWAP finished */
5010 tcpm_ams_finish(port);
5012 tcpm_set_state(port, SNK_DISCOVERY, 0);
5015 if (port->vbus_present) {
5016 u32 current_lim = tcpm_get_current_limit(port);
5018 if (port->slow_charger_loop && (current_lim > PD_P_SNK_STDBY_MW / 5))
5019 current_lim = PD_P_SNK_STDBY_MW / 5;
5020 tcpm_set_current_limit(port, current_lim, 5000);
5021 /* Not sink vbus if operational current is 0mA */
5022 tcpm_set_charge(port, !port->pd_supported ||
5023 pdo_max_current(port->snk_pdo[0]));
5025 if (!port->pd_supported)
5026 tcpm_set_state(port, SNK_READY, 0);
5028 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
5032 * For DRP, timeouts differ. Also, handling is supposed to be
5033 * different and much more complex (dead battery detection;
5034 * see USB power delivery specification, section 8.3.3.6.1.5.1).
5036 tcpm_set_state(port, hard_reset_state(port),
5037 port->port_type == TYPEC_PORT_DRP ?
5038 PD_T_DB_DETECT : PD_T_NO_RESPONSE);
5040 case SNK_DISCOVERY_DEBOUNCE:
5041 tcpm_set_state(port, SNK_DISCOVERY_DEBOUNCE_DONE,
5042 port->timings.cc_debounce_time);
5044 case SNK_DISCOVERY_DEBOUNCE_DONE:
5045 if (!tcpm_port_is_disconnected(port) &&
5046 tcpm_port_is_sink(port) &&
5047 ktime_after(port->delayed_runtime, ktime_get())) {
5048 tcpm_set_state(port, SNK_DISCOVERY,
5049 ktime_to_ms(ktime_sub(port->delayed_runtime, ktime_get())));
5052 tcpm_set_state(port, unattached_state(port), 0);
5054 case SNK_WAIT_CAPABILITIES:
5055 ret = port->tcpc->set_pd_rx(port->tcpc, true);
5057 tcpm_set_state(port, SNK_READY, 0);
5061 * If VBUS has never been low, and we time out waiting
5062 * for source cap, try a soft reset first, in case we
5063 * were already in a stable contract before this boot.
5064 * Do this only once.
5066 if (port->vbus_never_low) {
5067 port->vbus_never_low = false;
5068 tcpm_set_state(port, SNK_SOFT_RESET,
5069 port->timings.sink_wait_cap_time);
5071 if (!port->self_powered)
5072 upcoming_state = SNK_WAIT_CAPABILITIES_TIMEOUT;
5074 upcoming_state = hard_reset_state(port);
5075 tcpm_set_state(port, SNK_WAIT_CAPABILITIES_TIMEOUT,
5076 port->timings.sink_wait_cap_time);
5079 case SNK_WAIT_CAPABILITIES_TIMEOUT:
5081 * There are some USB PD sources in the field, which do not
5082 * properly implement the specification and fail to start
5083 * sending Source Capability messages after a soft reset. The
5084 * specification suggests to do a hard reset when no Source
5085 * capability message is received within PD_T_SINK_WAIT_CAP,
5086 * but that might effectively kil the machine's power source.
5088 * This slightly diverges from the specification and tries to
5089 * recover from this by explicitly asking for the capabilities
5090 * using the Get_Source_Cap control message before falling back
5091 * to a hard reset. The control message should also be supported
5092 * and handled by all USB PD source and dual role devices
5093 * according to the specification.
5095 if (tcpm_pd_send_control(port, PD_CTRL_GET_SOURCE_CAP, TCPC_TX_SOP))
5096 tcpm_set_state_cond(port, hard_reset_state(port), 0);
5098 tcpm_set_state(port, hard_reset_state(port),
5099 port->timings.sink_wait_cap_time);
5101 case SNK_NEGOTIATE_CAPABILITIES:
5102 port->pd_capable = true;
5103 tcpm_set_partner_usb_comm_capable(port,
5104 !!(port->source_caps[0] & PDO_FIXED_USB_COMM));
5105 port->hard_reset_count = 0;
5106 ret = tcpm_pd_send_request(port);
5108 /* Restore back to the original state */
5109 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
5110 port->pps_data.active,
5111 port->supply_voltage);
5112 /* Let the Source send capabilities again. */
5113 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
5115 tcpm_set_state_cond(port, hard_reset_state(port),
5116 PD_T_SENDER_RESPONSE);
5119 case SNK_NEGOTIATE_PPS_CAPABILITIES:
5120 ret = tcpm_pd_send_pps_request(port);
5122 /* Restore back to the original state */
5123 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_PD,
5124 port->pps_data.active,
5125 port->supply_voltage);
5126 port->pps_status = ret;
5128 * If this was called due to updates to sink
5129 * capabilities, and pps is no longer valid, we should
5130 * safely fall back to a standard PDO.
5132 if (port->update_sink_caps)
5133 tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
5135 tcpm_set_state(port, SNK_READY, 0);
5137 tcpm_set_state_cond(port, hard_reset_state(port),
5138 PD_T_SENDER_RESPONSE);
5141 case SNK_TRANSITION_SINK:
5142 /* From the USB PD spec:
5143 * "The Sink Shall transition to Sink Standby before a positive or
5144 * negative voltage transition of VBUS. During Sink Standby
5145 * the Sink Shall reduce its power draw to pSnkStdby."
5147 * This is not applicable to PPS though as the port can continue
5148 * to draw negotiated power without switching to standby.
5150 if (port->supply_voltage != port->req_supply_voltage && !port->pps_data.active &&
5151 port->current_limit * port->supply_voltage / 1000 > PD_P_SNK_STDBY_MW) {
5152 u32 stdby_ma = PD_P_SNK_STDBY_MW * 1000 / port->supply_voltage;
5154 tcpm_log(port, "Setting standby current %u mV @ %u mA",
5155 port->supply_voltage, stdby_ma);
5156 tcpm_set_current_limit(port, stdby_ma, port->supply_voltage);
5159 case SNK_TRANSITION_SINK_VBUS:
5160 tcpm_set_state(port, hard_reset_state(port),
5161 PD_T_PS_TRANSITION);
5164 port->try_snk_count = 0;
5165 port->update_sink_caps = false;
5166 if (port->explicit_contract) {
5167 typec_set_pwr_opmode(port->typec_port,
5169 port->pwr_opmode = TYPEC_PWR_MODE_PD;
5172 if (!port->pd_capable && port->slow_charger_loop)
5173 tcpm_set_current_limit(port, tcpm_get_current_limit(port), 5000);
5174 tcpm_swap_complete(port, 0);
5175 tcpm_typec_connect(port);
5176 if (port->pd_capable && port->source_caps[0] & PDO_FIXED_DUAL_ROLE)
5177 mod_enable_frs_delayed_work(port, 0);
5178 tcpm_pps_complete(port, port->pps_status);
5180 if (port->ams != NONE_AMS)
5181 tcpm_ams_finish(port);
5182 if (port->next_ams != NONE_AMS) {
5183 port->ams = port->next_ams;
5184 port->next_ams = NONE_AMS;
5188 * If previous AMS is interrupted, switch to the upcoming
5191 if (port->upcoming_state != INVALID_STATE) {
5192 upcoming_state = port->upcoming_state;
5193 port->upcoming_state = INVALID_STATE;
5194 tcpm_set_state(port, upcoming_state, 0);
5199 * 6.4.4.3.1 Discover Identity
5200 * "The Discover Identity Command Shall only be sent to SOP when there is an
5201 * Explicit Contract."
5203 * Discover Identity on SOP' should be discovered prior to the
5204 * ready state, but if done after a Vconn Swap following Discover
5205 * Identity on SOP then the discovery process can be run here
5208 if (port->explicit_contract) {
5209 if (port->send_discover_prime) {
5210 port->tx_sop_type = TCPC_TX_SOP_PRIME;
5212 port->tx_sop_type = TCPC_TX_SOP;
5213 tcpm_set_initial_svdm_version(port);
5215 mod_send_discover_delayed_work(port, 0);
5217 port->send_discover = false;
5218 port->send_discover_prime = false;
5221 power_supply_changed(port->psy);
5224 /* Accessory states */
5225 case ACC_UNATTACHED:
5226 tcpm_acc_detach(port);
5227 tcpm_set_state(port, SRC_UNATTACHED, 0);
5229 case DEBUG_ACC_ATTACHED:
5230 case AUDIO_ACC_ATTACHED:
5231 ret = tcpm_acc_attach(port);
5233 tcpm_set_state(port, ACC_UNATTACHED, 0);
5235 case AUDIO_ACC_DEBOUNCE:
5236 tcpm_set_state(port, ACC_UNATTACHED, port->timings.cc_debounce_time);
5239 /* Hard_Reset states */
5240 case HARD_RESET_SEND:
5241 if (port->ams != NONE_AMS)
5242 tcpm_ams_finish(port);
5243 if (!port->self_powered && port->port_type == TYPEC_PORT_SNK)
5244 dev_err(port->dev, "Initiating hard-reset, which might result in machine power-loss.\n");
5246 * State machine will be directed to HARD_RESET_START,
5247 * thus set upcoming_state to INVALID_STATE.
5249 port->upcoming_state = INVALID_STATE;
5250 tcpm_ams_start(port, HARD_RESET);
5252 case HARD_RESET_START:
5253 port->sink_cap_done = false;
5254 if (port->tcpc->enable_frs)
5255 port->tcpc->enable_frs(port->tcpc, false);
5256 port->hard_reset_count++;
5257 port->tcpc->set_pd_rx(port->tcpc, false);
5258 tcpm_unregister_altmodes(port);
5259 port->nr_sink_caps = 0;
5260 port->send_discover = true;
5261 port->send_discover_prime = false;
5262 if (port->pwr_role == TYPEC_SOURCE)
5263 tcpm_set_state(port, SRC_HARD_RESET_VBUS_OFF,
5264 PD_T_PS_HARD_RESET);
5266 tcpm_set_state(port, SNK_HARD_RESET_SINK_OFF, 0);
5268 case SRC_HARD_RESET_VBUS_OFF:
5270 * 7.1.5 Response to Hard Resets
5271 * Hard Reset Signaling indicates a communication failure has occurred and the
5272 * Source Shall stop driving VCONN, Shall remove Rp from the VCONN pin and Shall
5273 * drive VBUS to vSafe0V as shown in Figure 7-9.
5275 tcpm_set_vconn(port, false);
5276 tcpm_set_vbus(port, false);
5277 tcpm_set_roles(port, port->self_powered, TYPEC_SOURCE,
5278 tcpm_data_role_for_source(port));
5280 * If tcpc fails to notify vbus off, TCPM will wait for PD_T_SAFE_0V +
5281 * PD_T_SRC_RECOVER before turning vbus back on.
5282 * From Table 7-12 Sequence Description for a Source Initiated Hard Reset:
5283 * 4. Policy Engine waits tPSHardReset after sending Hard Reset Signaling and then
5284 * tells the Device Policy Manager to instruct the power supply to perform a
5285 * Hard Reset. The transition to vSafe0V Shall occur within tSafe0V (t2).
5286 * 5. After tSrcRecover the Source applies power to VBUS in an attempt to
5287 * re-establish communication with the Sink and resume USB Default Operation.
5288 * The transition to vSafe5V Shall occur within tSrcTurnOn(t4).
5290 tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SAFE_0V + PD_T_SRC_RECOVER);
5292 case SRC_HARD_RESET_VBUS_ON:
5293 tcpm_set_vconn(port, true);
5294 tcpm_set_vbus(port, true);
5295 if (port->ams == HARD_RESET)
5296 tcpm_ams_finish(port);
5297 if (port->pd_supported)
5298 port->tcpc->set_pd_rx(port->tcpc, true);
5299 tcpm_set_attached_state(port, true);
5300 tcpm_set_state(port, SRC_UNATTACHED, PD_T_PS_SOURCE_ON);
5302 case SNK_HARD_RESET_SINK_OFF:
5303 /* Do not discharge/disconnect during hard reset */
5304 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0);
5305 memset(&port->pps_data, 0, sizeof(port->pps_data));
5306 tcpm_set_vconn(port, false);
5307 if (port->pd_capable)
5308 tcpm_set_charge(port, false);
5309 tcpm_set_roles(port, port->self_powered, TYPEC_SINK,
5310 tcpm_data_role_for_sink(port));
5312 * VBUS may or may not toggle, depending on the adapter.
5313 * If it doesn't toggle, transition to SNK_HARD_RESET_SINK_ON
5314 * directly after timeout.
5316 tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, PD_T_SAFE_0V);
5318 case SNK_HARD_RESET_WAIT_VBUS:
5319 if (port->ams == HARD_RESET)
5320 tcpm_ams_finish(port);
5321 /* Assume we're disconnected if VBUS doesn't come back. */
5322 tcpm_set_state(port, SNK_UNATTACHED,
5323 PD_T_SRC_RECOVER_MAX + PD_T_SRC_TURN_ON);
5325 case SNK_HARD_RESET_SINK_ON:
5326 /* Note: There is no guarantee that VBUS is on in this state */
5329 * The specification suggests that dual mode ports in sink
5330 * mode should transition to state PE_SRC_Transition_to_default.
5331 * See USB power delivery specification chapter 8.3.3.6.1.3.
5332 * This would mean to
5333 * - turn off VCONN, reset power supply
5334 * - request hardware reset
5336 * - Transition to state PE_Src_Startup
5337 * SNK only ports shall transition to state Snk_Startup
5338 * (see chapter 8.3.3.3.8).
5339 * Similar, dual-mode ports in source mode should transition
5340 * to PE_SNK_Transition_to_default.
5342 if (port->pd_capable) {
5343 tcpm_set_current_limit(port,
5344 tcpm_get_current_limit(port),
5346 /* Not sink vbus if operational current is 0mA */
5347 tcpm_set_charge(port, !!pdo_max_current(port->snk_pdo[0]));
5349 if (port->ams == HARD_RESET)
5350 tcpm_ams_finish(port);
5351 tcpm_set_attached_state(port, true);
5352 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, VSAFE5V);
5353 tcpm_set_state(port, SNK_STARTUP, 0);
5356 /* Soft_Reset states */
5358 port->message_id = 0;
5359 port->rx_msgid = -1;
5360 /* remove existing capabilities */
5361 usb_power_delivery_unregister_capabilities(port->partner_source_caps);
5362 port->partner_source_caps = NULL;
5363 tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
5364 tcpm_ams_finish(port);
5365 if (port->pwr_role == TYPEC_SOURCE) {
5366 port->upcoming_state = SRC_SEND_CAPABILITIES;
5367 tcpm_ams_start(port, POWER_NEGOTIATION);
5369 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
5372 case SRC_SOFT_RESET_WAIT_SNK_TX:
5373 case SNK_SOFT_RESET:
5374 if (port->ams != NONE_AMS)
5375 tcpm_ams_finish(port);
5376 port->upcoming_state = SOFT_RESET_SEND;
5377 tcpm_ams_start(port, SOFT_RESET_AMS);
5379 case SOFT_RESET_SEND:
5381 * Power Delivery 3.0 Section 6.3.13
5383 * A Soft_Reset Message Shall be targeted at a specific entity
5384 * depending on the type of SOP* packet used.
5386 if (port->tx_sop_type == TCPC_TX_SOP_PRIME) {
5387 port->message_id_prime = 0;
5388 port->rx_msgid_prime = -1;
5389 tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET, TCPC_TX_SOP_PRIME);
5390 tcpm_set_state_cond(port, ready_state(port), PD_T_SENDER_RESPONSE);
5392 port->message_id = 0;
5393 port->rx_msgid = -1;
5394 /* remove existing capabilities */
5395 usb_power_delivery_unregister_capabilities(port->partner_source_caps);
5396 port->partner_source_caps = NULL;
5397 if (tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET, TCPC_TX_SOP))
5398 tcpm_set_state_cond(port, hard_reset_state(port), 0);
5400 tcpm_set_state_cond(port, hard_reset_state(port),
5401 PD_T_SENDER_RESPONSE);
5405 /* DR_Swap states */
5407 tcpm_pd_send_control(port, PD_CTRL_DR_SWAP, TCPC_TX_SOP);
5408 if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20) {
5409 port->send_discover = true;
5410 port->send_discover_prime = false;
5412 tcpm_set_state_cond(port, DR_SWAP_SEND_TIMEOUT,
5413 PD_T_SENDER_RESPONSE);
5415 case DR_SWAP_ACCEPT:
5416 tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
5417 if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20) {
5418 port->send_discover = true;
5419 port->send_discover_prime = false;
5421 tcpm_set_state_cond(port, DR_SWAP_CHANGE_DR, 0);
5423 case DR_SWAP_SEND_TIMEOUT:
5424 tcpm_swap_complete(port, -ETIMEDOUT);
5425 port->send_discover = false;
5426 port->send_discover_prime = false;
5427 tcpm_ams_finish(port);
5428 tcpm_set_state(port, ready_state(port), 0);
5430 case DR_SWAP_CHANGE_DR:
5431 tcpm_unregister_altmodes(port);
5432 if (port->data_role == TYPEC_HOST)
5433 tcpm_set_roles(port, true, port->pwr_role,
5436 tcpm_set_roles(port, true, port->pwr_role,
5438 tcpm_ams_finish(port);
5439 tcpm_set_state(port, ready_state(port), 0);
5443 if (tcpm_pd_send_control(port, PD_CTRL_FR_SWAP, TCPC_TX_SOP)) {
5444 tcpm_set_state(port, ERROR_RECOVERY, 0);
5447 tcpm_set_state_cond(port, FR_SWAP_SEND_TIMEOUT, PD_T_SENDER_RESPONSE);
5449 case FR_SWAP_SEND_TIMEOUT:
5450 tcpm_set_state(port, ERROR_RECOVERY, 0);
5452 case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
5453 tcpm_set_state(port, ERROR_RECOVERY, port->timings.ps_src_off_time);
5455 case FR_SWAP_SNK_SRC_NEW_SINK_READY:
5456 if (port->vbus_source)
5457 tcpm_set_state(port, FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED, 0);
5459 tcpm_set_state(port, ERROR_RECOVERY, PD_T_RECEIVER_RESPONSE);
5461 case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
5462 tcpm_set_pwr_role(port, TYPEC_SOURCE);
5463 if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP)) {
5464 tcpm_set_state(port, ERROR_RECOVERY, 0);
5467 tcpm_set_cc(port, tcpm_rp_cc(port));
5468 tcpm_set_state(port, SRC_STARTUP, PD_T_SWAP_SRC_START);
5471 /* PR_Swap states */
5472 case PR_SWAP_ACCEPT:
5473 tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
5474 tcpm_set_state(port, PR_SWAP_START, 0);
5477 tcpm_pd_send_control(port, PD_CTRL_PR_SWAP, TCPC_TX_SOP);
5478 tcpm_set_state_cond(port, PR_SWAP_SEND_TIMEOUT,
5479 PD_T_SENDER_RESPONSE);
5481 case PR_SWAP_SEND_TIMEOUT:
5482 tcpm_swap_complete(port, -ETIMEDOUT);
5483 tcpm_set_state(port, ready_state(port), 0);
5486 tcpm_apply_rc(port);
5487 if (port->pwr_role == TYPEC_SOURCE)
5488 tcpm_set_state(port, PR_SWAP_SRC_SNK_TRANSITION_OFF,
5489 PD_T_SRC_TRANSITION);
5491 tcpm_set_state(port, PR_SWAP_SNK_SRC_SINK_OFF, 0);
5493 case PR_SWAP_SRC_SNK_TRANSITION_OFF:
5495 * Prevent vbus discharge circuit from turning on during PR_SWAP
5496 * as this is not a disconnect.
5498 tcpm_set_vbus(port, false);
5499 port->explicit_contract = false;
5500 /* allow time for Vbus discharge, must be < tSrcSwapStdby */
5501 tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF,
5504 case PR_SWAP_SRC_SNK_SOURCE_OFF:
5505 tcpm_set_cc(port, TYPEC_CC_RD);
5506 /* allow CC debounce */
5507 tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED,
5508 port->timings.cc_debounce_time);
5510 case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
5512 * USB-PD standard, 6.2.1.4, Port Power Role:
5513 * "During the Power Role Swap Sequence, for the initial Source
5514 * Port, the Port Power Role field shall be set to Sink in the
5515 * PS_RDY Message indicating that the initial Source’s power
5516 * supply is turned off"
5518 tcpm_set_pwr_role(port, TYPEC_SINK);
5519 if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP)) {
5520 tcpm_set_state(port, ERROR_RECOVERY, 0);
5523 tcpm_set_state(port, ERROR_RECOVERY, PD_T_PS_SOURCE_ON_PRS);
5525 case PR_SWAP_SRC_SNK_SINK_ON:
5526 tcpm_enable_auto_vbus_discharge(port, true);
5527 /* Set the vbus disconnect threshold for implicit contract */
5528 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, VSAFE5V);
5529 tcpm_set_state(port, SNK_STARTUP, 0);
5531 case PR_SWAP_SNK_SRC_SINK_OFF:
5532 /* will be source, remove existing capabilities */
5533 usb_power_delivery_unregister_capabilities(port->partner_source_caps);
5534 port->partner_source_caps = NULL;
5536 * Prevent vbus discharge circuit from turning on during PR_SWAP
5537 * as this is not a disconnect.
5539 tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB,
5540 port->pps_data.active, 0);
5541 tcpm_set_charge(port, false);
5542 tcpm_set_state(port, hard_reset_state(port),
5543 port->timings.ps_src_off_time);
5545 case PR_SWAP_SNK_SRC_SOURCE_ON:
5546 tcpm_enable_auto_vbus_discharge(port, true);
5547 tcpm_set_cc(port, tcpm_rp_cc(port));
5548 tcpm_set_vbus(port, true);
5550 * allow time VBUS ramp-up, must be < tNewSrc
5551 * Also, this window overlaps with CC debounce as well.
5552 * So, Wait for the max of two which is PD_T_NEWSRC
5554 tcpm_set_state(port, PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP,
5557 case PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP:
5559 * USB PD standard, 6.2.1.4:
5560 * "Subsequent Messages initiated by the Policy Engine,
5561 * such as the PS_RDY Message sent to indicate that Vbus
5562 * is ready, will have the Port Power Role field set to
5565 tcpm_set_pwr_role(port, TYPEC_SOURCE);
5566 tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP);
5567 tcpm_set_state(port, SRC_STARTUP, PD_T_SWAP_SRC_START);
5570 case VCONN_SWAP_ACCEPT:
5571 tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
5572 tcpm_ams_finish(port);
5573 tcpm_set_state(port, VCONN_SWAP_START, 0);
5575 case VCONN_SWAP_SEND:
5576 tcpm_pd_send_control(port, PD_CTRL_VCONN_SWAP, TCPC_TX_SOP);
5577 tcpm_set_state(port, VCONN_SWAP_SEND_TIMEOUT,
5578 PD_T_SENDER_RESPONSE);
5580 case VCONN_SWAP_SEND_TIMEOUT:
5581 tcpm_swap_complete(port, -ETIMEDOUT);
5582 tcpm_set_state(port, ready_state(port), 0);
5584 case VCONN_SWAP_START:
5585 if (port->vconn_role == TYPEC_SOURCE)
5586 tcpm_set_state(port, VCONN_SWAP_WAIT_FOR_VCONN, 0);
5588 tcpm_set_state(port, VCONN_SWAP_TURN_ON_VCONN, 0);
5590 case VCONN_SWAP_WAIT_FOR_VCONN:
5591 tcpm_set_state(port, hard_reset_state(port),
5592 PD_T_VCONN_SOURCE_ON);
5594 case VCONN_SWAP_TURN_ON_VCONN:
5595 ret = tcpm_set_vconn(port, true);
5596 tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP);
5598 * USB PD 3.0 Section 6.4.4.3.1
5600 * Note that a Cable Plug or VPD will not be ready for PD
5601 * Communication until tVCONNStable after VCONN has been applied
5604 tcpm_set_state(port, VCONN_SWAP_SEND_SOFT_RESET,
5607 tcpm_set_state(port, ready_state(port), 0);
5609 case VCONN_SWAP_TURN_OFF_VCONN:
5610 tcpm_set_vconn(port, false);
5611 tcpm_set_state(port, ready_state(port), 0);
5613 case VCONN_SWAP_SEND_SOFT_RESET:
5614 tcpm_swap_complete(port, port->swap_status);
5615 if (tcpm_can_communicate_sop_prime(port)) {
5616 port->tx_sop_type = TCPC_TX_SOP_PRIME;
5617 port->upcoming_state = SOFT_RESET_SEND;
5618 tcpm_ams_start(port, SOFT_RESET_AMS);
5620 tcpm_set_state(port, ready_state(port), 0);
5624 case DR_SWAP_CANCEL:
5625 case PR_SWAP_CANCEL:
5626 case VCONN_SWAP_CANCEL:
5627 tcpm_swap_complete(port, port->swap_status);
5628 if (port->pwr_role == TYPEC_SOURCE)
5629 tcpm_set_state(port, SRC_READY, 0);
5631 tcpm_set_state(port, SNK_READY, 0);
5633 case FR_SWAP_CANCEL:
5634 if (port->pwr_role == TYPEC_SOURCE)
5635 tcpm_set_state(port, SRC_READY, 0);
5637 tcpm_set_state(port, SNK_READY, 0);
5641 switch (BDO_MODE_MASK(port->bist_request)) {
5642 case BDO_MODE_CARRIER2:
5643 tcpm_pd_transmit(port, TCPC_TX_BIST_MODE_2, NULL);
5644 tcpm_set_state(port, unattached_state(port),
5645 PD_T_BIST_CONT_MODE);
5647 case BDO_MODE_TESTDATA:
5648 if (port->tcpc->set_bist_data) {
5649 tcpm_log(port, "Enable BIST MODE TESTDATA");
5650 port->tcpc->set_bist_data(port->tcpc, true);
5657 case GET_STATUS_SEND:
5658 tcpm_pd_send_control(port, PD_CTRL_GET_STATUS, TCPC_TX_SOP);
5659 tcpm_set_state(port, GET_STATUS_SEND_TIMEOUT,
5660 PD_T_SENDER_RESPONSE);
5662 case GET_STATUS_SEND_TIMEOUT:
5663 tcpm_set_state(port, ready_state(port), 0);
5665 case GET_PPS_STATUS_SEND:
5666 tcpm_pd_send_control(port, PD_CTRL_GET_PPS_STATUS, TCPC_TX_SOP);
5667 tcpm_set_state(port, GET_PPS_STATUS_SEND_TIMEOUT,
5668 PD_T_SENDER_RESPONSE);
5670 case GET_PPS_STATUS_SEND_TIMEOUT:
5671 tcpm_set_state(port, ready_state(port), 0);
5674 tcpm_pd_send_control(port, PD_CTRL_GET_SINK_CAP, TCPC_TX_SOP);
5675 tcpm_set_state(port, GET_SINK_CAP_TIMEOUT, PD_T_SENDER_RESPONSE);
5677 case GET_SINK_CAP_TIMEOUT:
5678 port->sink_cap_done = true;
5679 tcpm_set_state(port, ready_state(port), 0);
5681 case ERROR_RECOVERY:
5682 tcpm_swap_complete(port, -EPROTO);
5683 tcpm_pps_complete(port, -EPROTO);
5684 tcpm_set_state(port, PORT_RESET, 0);
5687 tcpm_reset_port(port);
5688 if (port->self_powered)
5689 tcpm_set_cc(port, TYPEC_CC_OPEN);
5691 tcpm_set_cc(port, tcpm_default_state(port) == SNK_UNATTACHED ?
5692 TYPEC_CC_RD : tcpm_rp_cc(port));
5693 tcpm_set_state(port, PORT_RESET_WAIT_OFF,
5694 PD_T_ERROR_RECOVERY);
5696 case PORT_RESET_WAIT_OFF:
5697 tcpm_set_state(port,
5698 tcpm_default_state(port),
5699 port->vbus_present ? port->timings.ps_src_off_time : 0);
5702 /* AMS intermediate state */
5704 if (port->upcoming_state == INVALID_STATE) {
5705 tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ?
5706 SRC_READY : SNK_READY, 0);
5710 upcoming_state = port->upcoming_state;
5711 port->upcoming_state = INVALID_STATE;
5712 tcpm_set_state(port, upcoming_state, 0);
5716 case CHUNK_NOT_SUPP:
5717 tcpm_pd_send_control(port, PD_CTRL_NOT_SUPP, TCPC_TX_SOP);
5718 tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ? SRC_READY : SNK_READY, 0);
5722 case SRC_VDM_IDENTITY_REQUEST:
5723 port->send_discover_prime = true;
5724 port->tx_sop_type = TCPC_TX_SOP_PRIME;
5725 mod_send_discover_delayed_work(port, 0);
5726 port->upcoming_state = SRC_SEND_CAPABILITIES;
5730 WARN(1, "Unexpected port state %d\n", port->state);
5735 static void tcpm_state_machine_work(struct kthread_work *work)
5737 struct tcpm_port *port = container_of(work, struct tcpm_port, state_machine);
5738 enum tcpm_state prev_state;
5740 mutex_lock(&port->lock);
5741 port->state_machine_running = true;
5743 if (port->queued_message && tcpm_send_queued_message(port))
5746 /* If we were queued due to a delayed state change, update it now */
5747 if (port->delayed_state) {
5748 tcpm_log(port, "state change %s -> %s [delayed %ld ms]",
5749 tcpm_states[port->state],
5750 tcpm_states[port->delayed_state], port->delay_ms);
5751 port->prev_state = port->state;
5752 port->state = port->delayed_state;
5753 port->delayed_state = INVALID_STATE;
5757 * Continue running as long as we have (non-delayed) state changes
5761 prev_state = port->state;
5762 run_state_machine(port);
5763 if (port->queued_message)
5764 tcpm_send_queued_message(port);
5765 } while (port->state != prev_state && !port->delayed_state);
5768 port->state_machine_running = false;
5769 mutex_unlock(&port->lock);
5772 static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
5773 enum typec_cc_status cc2)
5775 enum typec_cc_status old_cc1, old_cc2;
5776 enum tcpm_state new_state;
5778 old_cc1 = port->cc1;
5779 old_cc2 = port->cc2;
5783 tcpm_log_force(port,
5784 "CC1: %u -> %u, CC2: %u -> %u [state %s, polarity %d, %s]",
5785 old_cc1, cc1, old_cc2, cc2, tcpm_states[port->state],
5787 tcpm_port_is_disconnected(port) ? "disconnected"
5790 switch (port->state) {
5792 if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
5793 tcpm_port_is_source(port))
5794 tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
5795 else if (tcpm_port_is_sink(port))
5796 tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
5798 case CHECK_CONTAMINANT:
5799 /* Wait for Toggling to be resumed */
5801 case SRC_UNATTACHED:
5802 case ACC_UNATTACHED:
5803 if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
5804 tcpm_port_is_source(port))
5805 tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
5807 case SRC_ATTACH_WAIT:
5808 if (tcpm_port_is_disconnected(port) ||
5809 tcpm_port_is_audio_detached(port))
5810 tcpm_set_state(port, SRC_UNATTACHED, 0);
5811 else if (cc1 != old_cc1 || cc2 != old_cc2)
5812 tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
5816 case SRC_SEND_CAPABILITIES:
5818 if (tcpm_port_is_disconnected(port) ||
5819 !tcpm_port_is_source(port)) {
5820 if (port->port_type == TYPEC_PORT_SRC)
5821 tcpm_set_state(port, SRC_UNATTACHED, tcpm_wait_for_discharge(port));
5823 tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
5826 case SNK_UNATTACHED:
5827 if (tcpm_port_is_sink(port))
5828 tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
5830 case SNK_ATTACH_WAIT:
5831 if ((port->cc1 == TYPEC_CC_OPEN &&
5832 port->cc2 != TYPEC_CC_OPEN) ||
5833 (port->cc1 != TYPEC_CC_OPEN &&
5834 port->cc2 == TYPEC_CC_OPEN))
5835 new_state = SNK_DEBOUNCED;
5836 else if (tcpm_port_is_disconnected(port))
5837 new_state = SNK_UNATTACHED;
5840 if (new_state != port->delayed_state)
5841 tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
5844 if (tcpm_port_is_disconnected(port))
5845 new_state = SNK_UNATTACHED;
5846 else if (port->vbus_present)
5847 new_state = tcpm_try_src(port) ? SRC_TRY : SNK_ATTACHED;
5849 new_state = SNK_UNATTACHED;
5850 if (new_state != port->delayed_state)
5851 tcpm_set_state(port, SNK_DEBOUNCED, 0);
5855 * EXIT condition is based primarily on vbus disconnect and CC is secondary.
5856 * "A port that has entered into USB PD communications with the Source and
5857 * has seen the CC voltage exceed vRd-USB may monitor the CC pin to detect
5858 * cable disconnect in addition to monitoring VBUS.
5860 * A port that is monitoring the CC voltage for disconnect (but is not in
5861 * the process of a USB PD PR_Swap or USB PD FR_Swap) shall transition to
5862 * Unattached.SNK within tSinkDisconnect after the CC voltage remains below
5863 * vRd-USB for tPDDebounce."
5865 * When set_auto_vbus_discharge_threshold is enabled, CC pins go
5866 * away before vbus decays to disconnect threshold. Allow
5867 * disconnect to be driven by vbus disconnect when auto vbus
5868 * discharge is enabled.
5870 if (!port->auto_vbus_discharge_enabled && tcpm_port_is_disconnected(port))
5871 tcpm_set_state(port, unattached_state(port), 0);
5872 else if (!port->pd_capable &&
5873 (cc1 != old_cc1 || cc2 != old_cc2))
5874 tcpm_set_current_limit(port,
5875 tcpm_get_current_limit(port),
5879 case AUDIO_ACC_ATTACHED:
5880 if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN)
5881 tcpm_set_state(port, AUDIO_ACC_DEBOUNCE, 0);
5883 case AUDIO_ACC_DEBOUNCE:
5884 if (tcpm_port_is_audio(port))
5885 tcpm_set_state(port, AUDIO_ACC_ATTACHED, 0);
5888 case DEBUG_ACC_ATTACHED:
5889 if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN)
5890 tcpm_set_state(port, ACC_UNATTACHED, 0);
5894 /* Do nothing, waiting for timeout */
5898 /* CC line is unstable, wait for debounce */
5899 if (tcpm_port_is_disconnected(port))
5900 tcpm_set_state(port, SNK_DISCOVERY_DEBOUNCE, 0);
5902 case SNK_DISCOVERY_DEBOUNCE:
5906 /* Hand over to state machine if needed */
5907 if (!port->vbus_present && tcpm_port_is_source(port))
5908 tcpm_set_state(port, SRC_TRYWAIT_DEBOUNCE, 0);
5910 case SRC_TRYWAIT_DEBOUNCE:
5911 if (port->vbus_present || !tcpm_port_is_source(port))
5912 tcpm_set_state(port, SRC_TRYWAIT, 0);
5914 case SNK_TRY_WAIT_DEBOUNCE:
5915 if (!tcpm_port_is_sink(port)) {
5917 tcpm_set_state(port, SRC_TRYWAIT, 0);
5921 if (tcpm_port_is_source(port))
5922 tcpm_set_state(port, SRC_TRY_DEBOUNCE, 0);
5924 case SRC_TRY_DEBOUNCE:
5925 tcpm_set_state(port, SRC_TRY_WAIT, 0);
5927 case SNK_TRYWAIT_DEBOUNCE:
5928 if (tcpm_port_is_sink(port))
5929 tcpm_set_state(port, SNK_TRYWAIT_VBUS, 0);
5931 case SNK_TRYWAIT_VBUS:
5932 if (!tcpm_port_is_sink(port))
5933 tcpm_set_state(port, SNK_TRYWAIT_DEBOUNCE, 0);
5935 case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS:
5936 if (!tcpm_port_is_sink(port))
5937 tcpm_set_state(port, SRC_TRYWAIT, PD_T_TRY_CC_DEBOUNCE);
5939 tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS, 0);
5942 /* Do nothing, waiting for tCCDebounce */
5944 case PR_SWAP_SNK_SRC_SINK_OFF:
5945 case PR_SWAP_SRC_SNK_TRANSITION_OFF:
5946 case PR_SWAP_SRC_SNK_SOURCE_OFF:
5947 case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
5948 case PR_SWAP_SNK_SRC_SOURCE_ON:
5950 * CC state change is expected in PR_SWAP
5955 case FR_SWAP_SEND_TIMEOUT:
5956 case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
5957 case FR_SWAP_SNK_SRC_NEW_SINK_READY:
5958 case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
5959 /* Do nothing, CC change expected */
5963 case PORT_RESET_WAIT_OFF:
5965 * State set back to default mode once the timer completes.
5966 * Ignore CC changes here.
5971 * While acting as sink and auto vbus discharge is enabled, Allow disconnect
5972 * to be driven by vbus disconnect.
5974 if (tcpm_port_is_disconnected(port) && !(port->pwr_role == TYPEC_SINK &&
5975 port->auto_vbus_discharge_enabled))
5976 tcpm_set_state(port, unattached_state(port), 0);
5981 static void _tcpm_pd_vbus_on(struct tcpm_port *port)
5983 tcpm_log_force(port, "VBUS on");
5984 port->vbus_present = true;
5986 * When vbus_present is true i.e. Voltage at VBUS is greater than VSAFE5V implicitly
5987 * states that vbus is not at VSAFE0V, hence clear the vbus_vsafe0v flag here.
5989 port->vbus_vsafe0v = false;
5991 switch (port->state) {
5992 case SNK_TRANSITION_SINK_VBUS:
5993 port->explicit_contract = true;
5994 tcpm_set_state(port, SNK_READY, 0);
5997 tcpm_set_state(port, SNK_DISCOVERY, 0);
6001 tcpm_set_state(port, tcpm_try_src(port) ? SRC_TRY
6005 case SNK_HARD_RESET_WAIT_VBUS:
6006 tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, 0);
6009 tcpm_set_state(port, SRC_STARTUP, 0);
6011 case SRC_HARD_RESET_VBUS_ON:
6012 tcpm_set_state(port, SRC_STARTUP, 0);
6016 /* Do nothing, waiting for timeout */
6019 /* Do nothing, Waiting for Rd to be detected */
6021 case SRC_TRYWAIT_DEBOUNCE:
6022 tcpm_set_state(port, SRC_TRYWAIT, 0);
6024 case SNK_TRY_WAIT_DEBOUNCE:
6025 /* Do nothing, waiting for PD_DEBOUNCE to do be done */
6028 /* Do nothing, waiting for tCCDebounce */
6030 case SNK_TRYWAIT_VBUS:
6031 if (tcpm_port_is_sink(port))
6032 tcpm_set_state(port, SNK_ATTACHED, 0);
6034 case SNK_TRYWAIT_DEBOUNCE:
6035 /* Do nothing, waiting for Rp */
6037 case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS:
6038 if (port->vbus_present && tcpm_port_is_sink(port))
6039 tcpm_set_state(port, SNK_ATTACHED, 0);
6042 case SRC_TRY_DEBOUNCE:
6043 /* Do nothing, waiting for sink detection */
6046 case FR_SWAP_SEND_TIMEOUT:
6047 case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
6048 case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
6049 if (port->tcpc->frs_sourcing_vbus)
6050 port->tcpc->frs_sourcing_vbus(port->tcpc);
6052 case FR_SWAP_SNK_SRC_NEW_SINK_READY:
6053 if (port->tcpc->frs_sourcing_vbus)
6054 port->tcpc->frs_sourcing_vbus(port->tcpc);
6055 tcpm_set_state(port, FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED, 0);
6059 case PORT_RESET_WAIT_OFF:
6061 * State set back to default mode once the timer completes.
6062 * Ignore vbus changes here.
6071 static void _tcpm_pd_vbus_off(struct tcpm_port *port)
6073 tcpm_log_force(port, "VBUS off");
6074 port->vbus_present = false;
6075 port->vbus_never_low = false;
6076 switch (port->state) {
6077 case SNK_HARD_RESET_SINK_OFF:
6078 tcpm_set_state(port, SNK_HARD_RESET_WAIT_VBUS, 0);
6080 case HARD_RESET_SEND:
6083 /* Do nothing, waiting for timeout */
6086 /* Hand over to state machine if needed */
6087 if (tcpm_port_is_source(port))
6088 tcpm_set_state(port, SRC_TRYWAIT_DEBOUNCE, 0);
6090 case SNK_TRY_WAIT_DEBOUNCE:
6091 /* Do nothing, waiting for PD_DEBOUNCE to do be done */
6094 case SNK_TRYWAIT_VBUS:
6095 case SNK_TRYWAIT_DEBOUNCE:
6097 case SNK_ATTACH_WAIT:
6099 /* Do nothing, as TCPM is still waiting for vbus to reach VSAFE5V to connect */
6102 case SNK_NEGOTIATE_CAPABILITIES:
6105 case PR_SWAP_SRC_SNK_TRANSITION_OFF:
6106 tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF, 0);
6109 case PR_SWAP_SNK_SRC_SINK_OFF:
6110 /* Do nothing, expected */
6113 case PR_SWAP_SNK_SRC_SOURCE_ON:
6115 * Do nothing when vbus off notification is received.
6116 * TCPM can wait for PD_T_NEWSRC in PR_SWAP_SNK_SRC_SOURCE_ON
6117 * for the vbus source to ramp up.
6121 case PORT_RESET_WAIT_OFF:
6122 tcpm_set_state(port, tcpm_default_state(port), 0);
6126 case SRC_TRY_DEBOUNCE:
6127 /* Do nothing, waiting for sink detection */
6131 case SRC_SEND_CAPABILITIES:
6132 case SRC_SEND_CAPABILITIES_TIMEOUT:
6133 case SRC_NEGOTIATE_CAPABILITIES:
6134 case SRC_TRANSITION_SUPPLY:
6136 case SRC_WAIT_NEW_CAPABILITIES:
6138 * Force to unattached state to re-initiate connection.
6139 * DRP port should move to Unattached.SNK instead of Unattached.SRC if
6140 * sink removed. Although sink removal here is due to source's vbus collapse,
6141 * treat it the same way for consistency.
6143 if (port->port_type == TYPEC_PORT_SRC)
6144 tcpm_set_state(port, SRC_UNATTACHED, tcpm_wait_for_discharge(port));
6146 tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
6151 * State set back to default mode once the timer completes.
6152 * Ignore vbus changes here.
6157 case FR_SWAP_SEND_TIMEOUT:
6158 case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
6159 case FR_SWAP_SNK_SRC_NEW_SINK_READY:
6160 case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
6161 /* Do nothing, vbus drop expected */
6164 case SNK_HARD_RESET_WAIT_VBUS:
6165 /* Do nothing, its OK to receive vbus off events */
6169 if (port->pwr_role == TYPEC_SINK && port->attached)
6170 tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
6175 static void _tcpm_pd_vbus_vsafe0v(struct tcpm_port *port)
6177 tcpm_log_force(port, "VBUS VSAFE0V");
6178 port->vbus_vsafe0v = true;
6179 switch (port->state) {
6180 case SRC_HARD_RESET_VBUS_OFF:
6182 * After establishing the vSafe0V voltage condition on VBUS, the Source Shall wait
6183 * tSrcRecover before re-applying VCONN and restoring VBUS to vSafe5V.
6185 tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SRC_RECOVER);
6187 case SRC_ATTACH_WAIT:
6188 if (tcpm_port_is_source(port))
6189 tcpm_set_state(port, tcpm_try_snk(port) ? SNK_TRY : SRC_ATTACHED,
6190 port->timings.cc_debounce_time);
6193 case SRC_SEND_CAPABILITIES:
6194 case SRC_SEND_CAPABILITIES_TIMEOUT:
6195 case SRC_NEGOTIATE_CAPABILITIES:
6196 case SRC_TRANSITION_SUPPLY:
6198 case SRC_WAIT_NEW_CAPABILITIES:
6199 if (port->auto_vbus_discharge_enabled) {
6200 if (port->port_type == TYPEC_PORT_SRC)
6201 tcpm_set_state(port, SRC_UNATTACHED, 0);
6203 tcpm_set_state(port, SNK_UNATTACHED, 0);
6206 case PR_SWAP_SNK_SRC_SINK_OFF:
6207 case PR_SWAP_SNK_SRC_SOURCE_ON:
6208 /* Do nothing, vsafe0v is expected during transition */
6210 case SNK_ATTACH_WAIT:
6212 /*Do nothing, still waiting for VSAFE5V for connect */
6214 case SNK_HARD_RESET_WAIT_VBUS:
6215 /* Do nothing, its OK to receive vbus off events */
6218 if (port->pwr_role == TYPEC_SINK && port->auto_vbus_discharge_enabled)
6219 tcpm_set_state(port, SNK_UNATTACHED, 0);
6224 static void _tcpm_pd_hard_reset(struct tcpm_port *port)
6226 tcpm_log_force(port, "Received hard reset");
6227 if (port->bist_request == BDO_MODE_TESTDATA && port->tcpc->set_bist_data)
6228 port->tcpc->set_bist_data(port->tcpc, false);
6230 switch (port->state) {
6232 case ERROR_RECOVERY:
6234 case PORT_RESET_WAIT_OFF:
6240 if (port->ams != NONE_AMS)
6241 port->ams = NONE_AMS;
6242 if (port->hard_reset_count < PD_N_HARD_RESET_COUNT)
6243 port->ams = HARD_RESET;
6245 * If we keep receiving hard reset requests, executing the hard reset
6246 * must have failed. Revert to error recovery if that happens.
6248 tcpm_set_state(port,
6249 port->hard_reset_count < PD_N_HARD_RESET_COUNT ?
6250 HARD_RESET_START : ERROR_RECOVERY,
6254 static void tcpm_pd_event_handler(struct kthread_work *work)
6256 struct tcpm_port *port = container_of(work, struct tcpm_port,
6260 mutex_lock(&port->lock);
6262 spin_lock(&port->pd_event_lock);
6263 while (port->pd_events) {
6264 events = port->pd_events;
6265 port->pd_events = 0;
6266 spin_unlock(&port->pd_event_lock);
6267 if (events & TCPM_RESET_EVENT)
6268 _tcpm_pd_hard_reset(port);
6269 if (events & TCPM_VBUS_EVENT) {
6272 vbus = port->tcpc->get_vbus(port->tcpc);
6274 _tcpm_pd_vbus_on(port);
6276 _tcpm_pd_vbus_off(port);
6278 * When TCPC does not support detecting vsafe0v voltage level,
6279 * treat vbus absent as vsafe0v. Else invoke is_vbus_vsafe0v
6280 * to see if vbus has discharge to VSAFE0V.
6282 if (!port->tcpc->is_vbus_vsafe0v ||
6283 port->tcpc->is_vbus_vsafe0v(port->tcpc))
6284 _tcpm_pd_vbus_vsafe0v(port);
6287 if (events & TCPM_CC_EVENT) {
6288 enum typec_cc_status cc1, cc2;
6290 if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0)
6291 _tcpm_cc_change(port, cc1, cc2);
6293 if (events & TCPM_FRS_EVENT) {
6294 if (port->state == SNK_READY) {
6297 port->upcoming_state = FR_SWAP_SEND;
6298 ret = tcpm_ams_start(port, FAST_ROLE_SWAP);
6300 port->upcoming_state = INVALID_STATE;
6302 tcpm_log(port, "Discarding FRS_SIGNAL! Not in sink ready");
6305 if (events & TCPM_SOURCING_VBUS) {
6306 tcpm_log(port, "sourcing vbus");
6308 * In fast role swap case TCPC autonomously sources vbus. Set vbus_source
6309 * true as TCPM wouldn't have called tcpm_set_vbus.
6311 * When vbus is sourced on the command on TCPM i.e. TCPM called
6312 * tcpm_set_vbus to source vbus, vbus_source would already be true.
6314 port->vbus_source = true;
6315 _tcpm_pd_vbus_on(port);
6317 if (events & TCPM_PORT_CLEAN) {
6318 tcpm_log(port, "port clean");
6319 if (port->state == CHECK_CONTAMINANT) {
6320 if (tcpm_start_toggling(port, tcpm_rp_cc(port)))
6321 tcpm_set_state(port, TOGGLING, 0);
6323 tcpm_set_state(port, tcpm_default_state(port), 0);
6326 if (events & TCPM_PORT_ERROR) {
6327 tcpm_log(port, "port triggering error recovery");
6328 tcpm_set_state(port, ERROR_RECOVERY, 0);
6331 spin_lock(&port->pd_event_lock);
6333 spin_unlock(&port->pd_event_lock);
6334 mutex_unlock(&port->lock);
6337 void tcpm_cc_change(struct tcpm_port *port)
6339 spin_lock(&port->pd_event_lock);
6340 port->pd_events |= TCPM_CC_EVENT;
6341 spin_unlock(&port->pd_event_lock);
6342 kthread_queue_work(port->wq, &port->event_work);
6344 EXPORT_SYMBOL_GPL(tcpm_cc_change);
6346 void tcpm_vbus_change(struct tcpm_port *port)
6348 spin_lock(&port->pd_event_lock);
6349 port->pd_events |= TCPM_VBUS_EVENT;
6350 spin_unlock(&port->pd_event_lock);
6351 kthread_queue_work(port->wq, &port->event_work);
6353 EXPORT_SYMBOL_GPL(tcpm_vbus_change);
6355 void tcpm_pd_hard_reset(struct tcpm_port *port)
6357 spin_lock(&port->pd_event_lock);
6358 port->pd_events = TCPM_RESET_EVENT;
6359 spin_unlock(&port->pd_event_lock);
6360 kthread_queue_work(port->wq, &port->event_work);
6362 EXPORT_SYMBOL_GPL(tcpm_pd_hard_reset);
6364 void tcpm_sink_frs(struct tcpm_port *port)
6366 spin_lock(&port->pd_event_lock);
6367 port->pd_events |= TCPM_FRS_EVENT;
6368 spin_unlock(&port->pd_event_lock);
6369 kthread_queue_work(port->wq, &port->event_work);
6371 EXPORT_SYMBOL_GPL(tcpm_sink_frs);
6373 void tcpm_sourcing_vbus(struct tcpm_port *port)
6375 spin_lock(&port->pd_event_lock);
6376 port->pd_events |= TCPM_SOURCING_VBUS;
6377 spin_unlock(&port->pd_event_lock);
6378 kthread_queue_work(port->wq, &port->event_work);
6380 EXPORT_SYMBOL_GPL(tcpm_sourcing_vbus);
6382 void tcpm_port_clean(struct tcpm_port *port)
6384 spin_lock(&port->pd_event_lock);
6385 port->pd_events |= TCPM_PORT_CLEAN;
6386 spin_unlock(&port->pd_event_lock);
6387 kthread_queue_work(port->wq, &port->event_work);
6389 EXPORT_SYMBOL_GPL(tcpm_port_clean);
6391 bool tcpm_port_is_toggling(struct tcpm_port *port)
6393 return port->port_type == TYPEC_PORT_DRP && port->state == TOGGLING;
6395 EXPORT_SYMBOL_GPL(tcpm_port_is_toggling);
6397 void tcpm_port_error_recovery(struct tcpm_port *port)
6399 spin_lock(&port->pd_event_lock);
6400 port->pd_events |= TCPM_PORT_ERROR;
6401 spin_unlock(&port->pd_event_lock);
6402 kthread_queue_work(port->wq, &port->event_work);
6404 EXPORT_SYMBOL_GPL(tcpm_port_error_recovery);
6406 static void tcpm_enable_frs_work(struct kthread_work *work)
6408 struct tcpm_port *port = container_of(work, struct tcpm_port, enable_frs);
6411 mutex_lock(&port->lock);
6412 /* Not FRS capable */
6413 if (!port->connected || port->port_type != TYPEC_PORT_DRP ||
6414 port->pwr_opmode != TYPEC_PWR_MODE_PD ||
6415 !port->tcpc->enable_frs ||
6416 /* Sink caps queried */
6417 port->sink_cap_done || port->negotiated_rev < PD_REV30)
6420 /* Send when the state machine is idle */
6421 if (port->state != SNK_READY || port->vdm_sm_running || port->send_discover ||
6422 port->send_discover_prime)
6425 port->upcoming_state = GET_SINK_CAP;
6426 ret = tcpm_ams_start(port, GET_SINK_CAPABILITIES);
6427 if (ret == -EAGAIN) {
6428 port->upcoming_state = INVALID_STATE;
6430 port->sink_cap_done = true;
6434 mod_enable_frs_delayed_work(port, GET_SINK_CAP_RETRY_MS);
6436 mutex_unlock(&port->lock);
6439 static void tcpm_send_discover_work(struct kthread_work *work)
6441 struct tcpm_port *port = container_of(work, struct tcpm_port, send_discover_work);
6443 mutex_lock(&port->lock);
6444 /* No need to send DISCOVER_IDENTITY anymore */
6445 if (!port->send_discover && !port->send_discover_prime)
6448 if (port->data_role == TYPEC_DEVICE && port->negotiated_rev < PD_REV30) {
6449 port->send_discover = false;
6450 port->send_discover_prime = false;
6454 /* Retry if the port is not idle */
6455 if ((port->state != SRC_READY && port->state != SNK_READY &&
6456 port->state != SRC_VDM_IDENTITY_REQUEST) || port->vdm_sm_running) {
6457 mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
6461 tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0, port->tx_sop_type);
6464 mutex_unlock(&port->lock);
6467 static int tcpm_dr_set(struct typec_port *p, enum typec_data_role data)
6469 struct tcpm_port *port = typec_get_drvdata(p);
6472 mutex_lock(&port->swap_lock);
6473 mutex_lock(&port->lock);
6475 if (port->typec_caps.data != TYPEC_PORT_DRD) {
6479 if (port->state != SRC_READY && port->state != SNK_READY) {
6484 if (port->data_role == data) {
6491 * 6.3.9: If an alternate mode is active, a request to swap
6492 * alternate modes shall trigger a port reset.
6493 * Reject data role swap request in this case.
6496 if (!port->pd_capable) {
6498 * If the partner is not PD capable, reset the port to
6499 * trigger a role change. This can only work if a preferred
6500 * role is configured, and if it matches the requested role.
6502 if (port->try_role == TYPEC_NO_PREFERRED_ROLE ||
6503 port->try_role == port->pwr_role) {
6507 port->non_pd_role_swap = true;
6508 tcpm_set_state(port, PORT_RESET, 0);
6510 port->upcoming_state = DR_SWAP_SEND;
6511 ret = tcpm_ams_start(port, DATA_ROLE_SWAP);
6512 if (ret == -EAGAIN) {
6513 port->upcoming_state = INVALID_STATE;
6518 port->swap_status = 0;
6519 port->swap_pending = true;
6520 reinit_completion(&port->swap_complete);
6521 mutex_unlock(&port->lock);
6523 if (!wait_for_completion_timeout(&port->swap_complete,
6524 msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
6527 ret = port->swap_status;
6529 port->non_pd_role_swap = false;
6533 mutex_unlock(&port->lock);
6535 mutex_unlock(&port->swap_lock);
6539 static int tcpm_pr_set(struct typec_port *p, enum typec_role role)
6541 struct tcpm_port *port = typec_get_drvdata(p);
6544 mutex_lock(&port->swap_lock);
6545 mutex_lock(&port->lock);
6547 if (port->port_type != TYPEC_PORT_DRP) {
6551 if (port->state != SRC_READY && port->state != SNK_READY) {
6556 if (role == port->pwr_role) {
6561 port->upcoming_state = PR_SWAP_SEND;
6562 ret = tcpm_ams_start(port, POWER_ROLE_SWAP);
6563 if (ret == -EAGAIN) {
6564 port->upcoming_state = INVALID_STATE;
6568 port->swap_status = 0;
6569 port->swap_pending = true;
6570 reinit_completion(&port->swap_complete);
6571 mutex_unlock(&port->lock);
6573 if (!wait_for_completion_timeout(&port->swap_complete,
6574 msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
6577 ret = port->swap_status;
6582 mutex_unlock(&port->lock);
6584 mutex_unlock(&port->swap_lock);
6588 static int tcpm_vconn_set(struct typec_port *p, enum typec_role role)
6590 struct tcpm_port *port = typec_get_drvdata(p);
6593 mutex_lock(&port->swap_lock);
6594 mutex_lock(&port->lock);
6596 if (port->state != SRC_READY && port->state != SNK_READY) {
6601 if (role == port->vconn_role) {
6606 port->upcoming_state = VCONN_SWAP_SEND;
6607 ret = tcpm_ams_start(port, VCONN_SWAP);
6608 if (ret == -EAGAIN) {
6609 port->upcoming_state = INVALID_STATE;
6613 port->swap_status = 0;
6614 port->swap_pending = true;
6615 reinit_completion(&port->swap_complete);
6616 mutex_unlock(&port->lock);
6618 if (!wait_for_completion_timeout(&port->swap_complete,
6619 msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
6622 ret = port->swap_status;
6627 mutex_unlock(&port->lock);
6629 mutex_unlock(&port->swap_lock);
6633 static int tcpm_try_role(struct typec_port *p, int role)
6635 struct tcpm_port *port = typec_get_drvdata(p);
6636 struct tcpc_dev *tcpc = port->tcpc;
6639 mutex_lock(&port->lock);
6641 ret = tcpc->try_role(tcpc, role);
6643 port->try_role = role;
6644 port->try_src_count = 0;
6645 port->try_snk_count = 0;
6646 mutex_unlock(&port->lock);
6651 static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 req_op_curr)
6653 unsigned int target_mw;
6656 mutex_lock(&port->swap_lock);
6657 mutex_lock(&port->lock);
6659 if (!port->pps_data.active) {
6664 if (port->state != SNK_READY) {
6669 if (req_op_curr > port->pps_data.max_curr) {
6674 target_mw = (req_op_curr * port->supply_voltage) / 1000;
6675 if (target_mw < port->operating_snk_mw) {
6680 port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
6681 ret = tcpm_ams_start(port, POWER_NEGOTIATION);
6682 if (ret == -EAGAIN) {
6683 port->upcoming_state = INVALID_STATE;
6687 /* Round down operating current to align with PPS valid steps */
6688 req_op_curr = req_op_curr - (req_op_curr % RDO_PROG_CURR_MA_STEP);
6690 reinit_completion(&port->pps_complete);
6691 port->pps_data.req_op_curr = req_op_curr;
6692 port->pps_status = 0;
6693 port->pps_pending = true;
6694 mutex_unlock(&port->lock);
6696 if (!wait_for_completion_timeout(&port->pps_complete,
6697 msecs_to_jiffies(PD_PPS_CTRL_TIMEOUT)))
6700 ret = port->pps_status;
6705 mutex_unlock(&port->lock);
6707 mutex_unlock(&port->swap_lock);
6712 static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 req_out_volt)
6714 unsigned int target_mw;
6717 mutex_lock(&port->swap_lock);
6718 mutex_lock(&port->lock);
6720 if (!port->pps_data.active) {
6725 if (port->state != SNK_READY) {
6730 target_mw = (port->current_limit * req_out_volt) / 1000;
6731 if (target_mw < port->operating_snk_mw) {
6736 port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
6737 ret = tcpm_ams_start(port, POWER_NEGOTIATION);
6738 if (ret == -EAGAIN) {
6739 port->upcoming_state = INVALID_STATE;
6743 /* Round down output voltage to align with PPS valid steps */
6744 req_out_volt = req_out_volt - (req_out_volt % RDO_PROG_VOLT_MV_STEP);
6746 reinit_completion(&port->pps_complete);
6747 port->pps_data.req_out_volt = req_out_volt;
6748 port->pps_status = 0;
6749 port->pps_pending = true;
6750 mutex_unlock(&port->lock);
6752 if (!wait_for_completion_timeout(&port->pps_complete,
6753 msecs_to_jiffies(PD_PPS_CTRL_TIMEOUT)))
6756 ret = port->pps_status;
6761 mutex_unlock(&port->lock);
6763 mutex_unlock(&port->swap_lock);
6768 static int tcpm_pps_activate(struct tcpm_port *port, bool activate)
6772 mutex_lock(&port->swap_lock);
6773 mutex_lock(&port->lock);
6775 if (!port->pps_data.supported) {
6780 /* Trying to deactivate PPS when already deactivated so just bail */
6781 if (!port->pps_data.active && !activate)
6784 if (port->state != SNK_READY) {
6790 port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
6792 port->upcoming_state = SNK_NEGOTIATE_CAPABILITIES;
6793 ret = tcpm_ams_start(port, POWER_NEGOTIATION);
6794 if (ret == -EAGAIN) {
6795 port->upcoming_state = INVALID_STATE;
6799 reinit_completion(&port->pps_complete);
6800 port->pps_status = 0;
6801 port->pps_pending = true;
6803 /* Trigger PPS request or move back to standard PDO contract */
6805 port->pps_data.req_out_volt = port->supply_voltage;
6806 port->pps_data.req_op_curr = port->current_limit;
6808 mutex_unlock(&port->lock);
6810 if (!wait_for_completion_timeout(&port->pps_complete,
6811 msecs_to_jiffies(PD_PPS_CTRL_TIMEOUT)))
6814 ret = port->pps_status;
6819 mutex_unlock(&port->lock);
6821 mutex_unlock(&port->swap_lock);
6826 static void tcpm_init(struct tcpm_port *port)
6828 enum typec_cc_status cc1, cc2;
6830 port->tcpc->init(port->tcpc);
6832 tcpm_reset_port(port);
6836 * Should possibly wait for VBUS to settle if it was enabled locally
6837 * since tcpm_reset_port() will disable VBUS.
6839 port->vbus_present = port->tcpc->get_vbus(port->tcpc);
6840 if (port->vbus_present)
6841 port->vbus_never_low = true;
6844 * 1. When vbus_present is true, voltage on VBUS is already at VSAFE5V.
6845 * So implicitly vbus_vsafe0v = false.
6847 * 2. When vbus_present is false and TCPC does NOT support querying
6848 * vsafe0v status, then, it's best to assume vbus is at VSAFE0V i.e.
6849 * vbus_vsafe0v is true.
6851 * 3. When vbus_present is false and TCPC does support querying vsafe0v,
6852 * then, query tcpc for vsafe0v status.
6854 if (port->vbus_present)
6855 port->vbus_vsafe0v = false;
6856 else if (!port->tcpc->is_vbus_vsafe0v)
6857 port->vbus_vsafe0v = true;
6859 port->vbus_vsafe0v = port->tcpc->is_vbus_vsafe0v(port->tcpc);
6861 tcpm_set_state(port, tcpm_default_state(port), 0);
6863 if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0)
6864 _tcpm_cc_change(port, cc1, cc2);
6867 * Some adapters need a clean slate at startup, and won't recover
6868 * otherwise. So do not try to be fancy and force a clean disconnect.
6870 tcpm_set_state(port, PORT_RESET, 0);
6873 static int tcpm_port_type_set(struct typec_port *p, enum typec_port_type type)
6875 struct tcpm_port *port = typec_get_drvdata(p);
6877 mutex_lock(&port->lock);
6878 if (type == port->port_type)
6881 port->port_type = type;
6883 if (!port->connected) {
6884 tcpm_set_state(port, PORT_RESET, 0);
6885 } else if (type == TYPEC_PORT_SNK) {
6886 if (!(port->pwr_role == TYPEC_SINK &&
6887 port->data_role == TYPEC_DEVICE))
6888 tcpm_set_state(port, PORT_RESET, 0);
6889 } else if (type == TYPEC_PORT_SRC) {
6890 if (!(port->pwr_role == TYPEC_SOURCE &&
6891 port->data_role == TYPEC_HOST))
6892 tcpm_set_state(port, PORT_RESET, 0);
6896 mutex_unlock(&port->lock);
6900 static struct pd_data *tcpm_find_pd_data(struct tcpm_port *port, struct usb_power_delivery *pd)
6904 for (i = 0; port->pd_list[i]; i++) {
6905 if (port->pd_list[i]->pd == pd)
6906 return port->pd_list[i];
6909 return ERR_PTR(-ENODATA);
6912 static struct usb_power_delivery **tcpm_pd_get(struct typec_port *p)
6914 struct tcpm_port *port = typec_get_drvdata(p);
6919 static int tcpm_pd_set(struct typec_port *p, struct usb_power_delivery *pd)
6921 struct tcpm_port *port = typec_get_drvdata(p);
6922 struct pd_data *data;
6925 mutex_lock(&port->lock);
6927 if (port->selected_pd == pd)
6930 data = tcpm_find_pd_data(port, pd);
6932 ret = PTR_ERR(data);
6936 if (data->sink_desc.pdo[0]) {
6937 for (i = 0; i < PDO_MAX_OBJECTS && data->sink_desc.pdo[i]; i++)
6938 port->snk_pdo[i] = data->sink_desc.pdo[i];
6939 port->nr_snk_pdo = i;
6940 port->operating_snk_mw = data->operating_snk_mw;
6943 if (data->source_desc.pdo[0]) {
6944 for (i = 0; i < PDO_MAX_OBJECTS && data->source_desc.pdo[i]; i++)
6945 port->src_pdo[i] = data->source_desc.pdo[i];
6946 port->nr_src_pdo = i;
6949 switch (port->state) {
6950 case SRC_UNATTACHED:
6951 case SRC_ATTACH_WAIT:
6953 tcpm_set_cc(port, tcpm_rp_cc(port));
6955 case SRC_SEND_CAPABILITIES:
6956 case SRC_SEND_CAPABILITIES_TIMEOUT:
6957 case SRC_NEGOTIATE_CAPABILITIES:
6959 case SRC_WAIT_NEW_CAPABILITIES:
6960 port->caps_count = 0;
6961 port->upcoming_state = SRC_SEND_CAPABILITIES;
6962 ret = tcpm_ams_start(port, POWER_NEGOTIATION);
6963 if (ret == -EAGAIN) {
6964 port->upcoming_state = INVALID_STATE;
6968 case SNK_NEGOTIATE_CAPABILITIES:
6969 case SNK_NEGOTIATE_PPS_CAPABILITIES:
6971 case SNK_TRANSITION_SINK:
6972 case SNK_TRANSITION_SINK_VBUS:
6973 if (port->pps_data.active)
6974 port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
6975 else if (port->pd_capable)
6976 port->upcoming_state = SNK_NEGOTIATE_CAPABILITIES;
6980 port->update_sink_caps = true;
6982 ret = tcpm_ams_start(port, POWER_NEGOTIATION);
6983 if (ret == -EAGAIN) {
6984 port->upcoming_state = INVALID_STATE;
6992 port->port_source_caps = data->source_cap;
6993 port->port_sink_caps = data->sink_cap;
6994 typec_port_set_usb_power_delivery(p, NULL);
6995 port->selected_pd = pd;
6996 typec_port_set_usb_power_delivery(p, port->selected_pd);
6998 mutex_unlock(&port->lock);
7002 static const struct typec_operations tcpm_ops = {
7003 .try_role = tcpm_try_role,
7004 .dr_set = tcpm_dr_set,
7005 .pr_set = tcpm_pr_set,
7006 .vconn_set = tcpm_vconn_set,
7007 .port_type_set = tcpm_port_type_set,
7008 .pd_get = tcpm_pd_get,
7009 .pd_set = tcpm_pd_set
7012 void tcpm_tcpc_reset(struct tcpm_port *port)
7014 mutex_lock(&port->lock);
7015 /* XXX: Maintain PD connection if possible? */
7017 mutex_unlock(&port->lock);
7019 EXPORT_SYMBOL_GPL(tcpm_tcpc_reset);
7021 static void tcpm_port_unregister_pd(struct tcpm_port *port)
7025 port->port_sink_caps = NULL;
7026 port->port_source_caps = NULL;
7027 for (i = 0; i < port->pd_count; i++) {
7028 usb_power_delivery_unregister_capabilities(port->pd_list[i]->sink_cap);
7029 usb_power_delivery_unregister_capabilities(port->pd_list[i]->source_cap);
7030 devm_kfree(port->dev, port->pd_list[i]);
7031 port->pd_list[i] = NULL;
7032 usb_power_delivery_unregister(port->pds[i]);
7033 port->pds[i] = NULL;
7037 static int tcpm_port_register_pd(struct tcpm_port *port)
7039 struct usb_power_delivery_desc desc = { port->typec_caps.pd_revision };
7040 struct usb_power_delivery_capabilities *cap;
7043 if (!port->nr_src_pdo && !port->nr_snk_pdo)
7046 for (i = 0; i < port->pd_count; i++) {
7047 port->pds[i] = usb_power_delivery_register(port->dev, &desc);
7048 if (IS_ERR(port->pds[i])) {
7049 ret = PTR_ERR(port->pds[i]);
7050 goto err_unregister;
7052 port->pd_list[i]->pd = port->pds[i];
7054 if (port->pd_list[i]->source_desc.pdo[0]) {
7055 cap = usb_power_delivery_register_capabilities(port->pds[i],
7056 &port->pd_list[i]->source_desc);
7059 goto err_unregister;
7061 port->pd_list[i]->source_cap = cap;
7064 if (port->pd_list[i]->sink_desc.pdo[0]) {
7065 cap = usb_power_delivery_register_capabilities(port->pds[i],
7066 &port->pd_list[i]->sink_desc);
7069 goto err_unregister;
7071 port->pd_list[i]->sink_cap = cap;
7075 port->port_source_caps = port->pd_list[0]->source_cap;
7076 port->port_sink_caps = port->pd_list[0]->sink_cap;
7077 port->selected_pd = port->pds[0];
7081 tcpm_port_unregister_pd(port);
7086 static void tcpm_fw_get_timings(struct tcpm_port *port, struct fwnode_handle *fwnode)
7091 ret = fwnode_property_read_u32(fwnode, "sink-wait-cap-time-ms", &val);
7093 port->timings.sink_wait_cap_time = val;
7095 port->timings.sink_wait_cap_time = PD_T_SINK_WAIT_CAP;
7097 ret = fwnode_property_read_u32(fwnode, "ps-source-off-time-ms", &val);
7099 port->timings.ps_src_off_time = val;
7101 port->timings.ps_src_off_time = PD_T_PS_SOURCE_OFF;
7103 ret = fwnode_property_read_u32(fwnode, "cc-debounce-time-ms", &val);
7105 port->timings.cc_debounce_time = val;
7107 port->timings.cc_debounce_time = PD_T_CC_DEBOUNCE;
7109 ret = fwnode_property_read_u32(fwnode, "sink-bc12-completion-time-ms", &val);
7111 port->timings.snk_bc12_cmpletion_time = val;
7114 static int tcpm_fw_get_caps(struct tcpm_port *port, struct fwnode_handle *fwnode)
7116 struct fwnode_handle *capabilities, *child, *caps = NULL;
7117 unsigned int nr_src_pdo, nr_snk_pdo;
7118 const char *opmode_str;
7119 u32 *src_pdo, *snk_pdo;
7120 u32 uw, frs_current;
7128 * This fwnode has a "compatible" property, but is never populated as a
7129 * struct device. Instead we simply parse it to read the properties.
7130 * This it breaks fw_devlink=on. To maintain backward compatibility
7131 * with existing DT files, we work around this by deleting any
7132 * fwnode_links to/from this fwnode.
7134 fw_devlink_purge_absent_suppliers(fwnode);
7136 ret = typec_get_fw_cap(&port->typec_caps, fwnode);
7142 if (fwnode_property_read_bool(fwnode, "accessory-mode-audio"))
7143 port->typec_caps.accessory[mode++] = TYPEC_ACCESSORY_AUDIO;
7145 if (fwnode_property_read_bool(fwnode, "accessory-mode-debug"))
7146 port->typec_caps.accessory[mode++] = TYPEC_ACCESSORY_DEBUG;
7148 port->port_type = port->typec_caps.type;
7149 port->pd_supported = !fwnode_property_read_bool(fwnode, "pd-disable");
7150 port->slow_charger_loop = fwnode_property_read_bool(fwnode, "slow-charger-loop");
7151 port->self_powered = fwnode_property_read_bool(fwnode, "self-powered");
7153 if (!port->pd_supported) {
7154 ret = fwnode_property_read_string(fwnode, "typec-power-opmode", &opmode_str);
7157 ret = typec_find_pwr_opmode(opmode_str);
7160 port->src_rp = tcpm_pwr_opmode_to_rp(ret);
7164 /* The following code are applicable to pd-capable ports, i.e. pd_supported is true. */
7166 /* FRS can only be supported by DRP ports */
7167 if (port->port_type == TYPEC_PORT_DRP) {
7168 ret = fwnode_property_read_u32(fwnode, "new-source-frs-typec-current",
7170 if (!ret && frs_current <= FRS_5V_3A)
7171 port->new_source_frs_current = frs_current;
7177 /* For the backward compatibility, "capabilities" node is optional. */
7178 capabilities = fwnode_get_named_child_node(fwnode, "capabilities");
7179 if (!capabilities) {
7182 fwnode_for_each_child_node(capabilities, child)
7185 if (!port->pd_count) {
7187 goto put_capabilities;
7191 port->pds = devm_kcalloc(port->dev, port->pd_count, sizeof(struct usb_power_delivery *),
7195 goto put_capabilities;
7198 port->pd_list = devm_kcalloc(port->dev, port->pd_count, sizeof(struct pd_data *),
7200 if (!port->pd_list) {
7202 goto put_capabilities;
7205 for (i = 0; i < port->pd_count; i++) {
7206 port->pd_list[i] = devm_kzalloc(port->dev, sizeof(struct pd_data), GFP_KERNEL);
7207 if (!port->pd_list[i]) {
7209 goto put_capabilities;
7212 src_pdo = port->pd_list[i]->source_desc.pdo;
7213 port->pd_list[i]->source_desc.role = TYPEC_SOURCE;
7214 snk_pdo = port->pd_list[i]->sink_desc.pdo;
7215 port->pd_list[i]->sink_desc.role = TYPEC_SINK;
7217 /* If "capabilities" is NULL, fall back to single pd cap population. */
7221 caps = fwnode_get_next_child_node(capabilities, caps);
7223 if (port->port_type != TYPEC_PORT_SNK) {
7224 ret = fwnode_property_count_u32(caps, "source-pdos");
7232 nr_src_pdo = min(ret, PDO_MAX_OBJECTS);
7233 ret = fwnode_property_read_u32_array(caps, "source-pdos", src_pdo,
7238 ret = tcpm_validate_caps(port, src_pdo, nr_src_pdo);
7243 port->nr_src_pdo = nr_src_pdo;
7244 memcpy_and_pad(port->src_pdo, sizeof(u32) * PDO_MAX_OBJECTS,
7245 port->pd_list[0]->source_desc.pdo,
7246 sizeof(u32) * nr_src_pdo,
7251 if (port->port_type != TYPEC_PORT_SRC) {
7252 ret = fwnode_property_count_u32(caps, "sink-pdos");
7261 nr_snk_pdo = min(ret, PDO_MAX_OBJECTS);
7262 ret = fwnode_property_read_u32_array(caps, "sink-pdos", snk_pdo,
7267 ret = tcpm_validate_caps(port, snk_pdo, nr_snk_pdo);
7271 if (fwnode_property_read_u32(caps, "op-sink-microwatt", &uw) < 0) {
7276 port->pd_list[i]->operating_snk_mw = uw / 1000;
7279 port->nr_snk_pdo = nr_snk_pdo;
7280 memcpy_and_pad(port->snk_pdo, sizeof(u32) * PDO_MAX_OBJECTS,
7281 port->pd_list[0]->sink_desc.pdo,
7282 sizeof(u32) * nr_snk_pdo,
7284 port->operating_snk_mw = port->pd_list[0]->operating_snk_mw;
7291 fwnode_handle_put(caps);
7293 fwnode_handle_put(capabilities);
7297 static int tcpm_fw_get_snk_vdos(struct tcpm_port *port, struct fwnode_handle *fwnode)
7301 /* sink-vdos is optional */
7302 ret = fwnode_property_count_u32(fwnode, "sink-vdos");
7306 port->nr_snk_vdo = min(ret, VDO_MAX_OBJECTS);
7307 if (port->nr_snk_vdo) {
7308 ret = fwnode_property_read_u32_array(fwnode, "sink-vdos",
7315 /* If sink-vdos is found, sink-vdos-v1 is expected for backward compatibility. */
7316 if (port->nr_snk_vdo) {
7317 ret = fwnode_property_count_u32(fwnode, "sink-vdos-v1");
7323 port->nr_snk_vdo_v1 = min(ret, VDO_MAX_OBJECTS);
7324 ret = fwnode_property_read_u32_array(fwnode, "sink-vdos-v1",
7326 port->nr_snk_vdo_v1);
7334 /* Power Supply access to expose source power information */
7335 enum tcpm_psy_online_states {
7336 TCPM_PSY_OFFLINE = 0,
7337 TCPM_PSY_FIXED_ONLINE,
7338 TCPM_PSY_PROG_ONLINE,
7341 static enum power_supply_property tcpm_psy_props[] = {
7342 POWER_SUPPLY_PROP_USB_TYPE,
7343 POWER_SUPPLY_PROP_ONLINE,
7344 POWER_SUPPLY_PROP_VOLTAGE_MIN,
7345 POWER_SUPPLY_PROP_VOLTAGE_MAX,
7346 POWER_SUPPLY_PROP_VOLTAGE_NOW,
7347 POWER_SUPPLY_PROP_CURRENT_MAX,
7348 POWER_SUPPLY_PROP_CURRENT_NOW,
7351 static int tcpm_psy_get_online(struct tcpm_port *port,
7352 union power_supply_propval *val)
7354 if (port->vbus_charge) {
7355 if (port->pps_data.active)
7356 val->intval = TCPM_PSY_PROG_ONLINE;
7358 val->intval = TCPM_PSY_FIXED_ONLINE;
7360 val->intval = TCPM_PSY_OFFLINE;
7366 static int tcpm_psy_get_voltage_min(struct tcpm_port *port,
7367 union power_supply_propval *val)
7369 if (port->pps_data.active)
7370 val->intval = port->pps_data.min_volt * 1000;
7372 val->intval = port->supply_voltage * 1000;
7377 static int tcpm_psy_get_voltage_max(struct tcpm_port *port,
7378 union power_supply_propval *val)
7380 if (port->pps_data.active)
7381 val->intval = port->pps_data.max_volt * 1000;
7383 val->intval = port->supply_voltage * 1000;
7388 static int tcpm_psy_get_voltage_now(struct tcpm_port *port,
7389 union power_supply_propval *val)
7391 val->intval = port->supply_voltage * 1000;
7396 static int tcpm_psy_get_current_max(struct tcpm_port *port,
7397 union power_supply_propval *val)
7399 if (port->pps_data.active)
7400 val->intval = port->pps_data.max_curr * 1000;
7402 val->intval = port->current_limit * 1000;
7407 static int tcpm_psy_get_current_now(struct tcpm_port *port,
7408 union power_supply_propval *val)
7410 val->intval = port->current_limit * 1000;
7415 static int tcpm_psy_get_input_power_limit(struct tcpm_port *port,
7416 union power_supply_propval *val)
7418 unsigned int src_mv, src_ma, max_src_uw = 0;
7419 unsigned int i, tmp;
7421 for (i = 0; i < port->nr_source_caps; i++) {
7422 u32 pdo = port->source_caps[i];
7424 if (pdo_type(pdo) == PDO_TYPE_FIXED) {
7425 src_mv = pdo_fixed_voltage(pdo);
7426 src_ma = pdo_max_current(pdo);
7427 tmp = src_mv * src_ma;
7428 max_src_uw = max(tmp, max_src_uw);
7432 val->intval = max_src_uw;
7436 static int tcpm_psy_get_prop(struct power_supply *psy,
7437 enum power_supply_property psp,
7438 union power_supply_propval *val)
7440 struct tcpm_port *port = power_supply_get_drvdata(psy);
7444 case POWER_SUPPLY_PROP_USB_TYPE:
7445 val->intval = port->usb_type;
7447 case POWER_SUPPLY_PROP_ONLINE:
7448 ret = tcpm_psy_get_online(port, val);
7450 case POWER_SUPPLY_PROP_VOLTAGE_MIN:
7451 ret = tcpm_psy_get_voltage_min(port, val);
7453 case POWER_SUPPLY_PROP_VOLTAGE_MAX:
7454 ret = tcpm_psy_get_voltage_max(port, val);
7456 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
7457 ret = tcpm_psy_get_voltage_now(port, val);
7459 case POWER_SUPPLY_PROP_CURRENT_MAX:
7460 ret = tcpm_psy_get_current_max(port, val);
7462 case POWER_SUPPLY_PROP_CURRENT_NOW:
7463 ret = tcpm_psy_get_current_now(port, val);
7465 case POWER_SUPPLY_PROP_INPUT_POWER_LIMIT:
7466 tcpm_psy_get_input_power_limit(port, val);
7476 static int tcpm_psy_set_online(struct tcpm_port *port,
7477 const union power_supply_propval *val)
7481 switch (val->intval) {
7482 case TCPM_PSY_FIXED_ONLINE:
7483 ret = tcpm_pps_activate(port, false);
7485 case TCPM_PSY_PROG_ONLINE:
7486 ret = tcpm_pps_activate(port, true);
7496 static int tcpm_psy_set_prop(struct power_supply *psy,
7497 enum power_supply_property psp,
7498 const union power_supply_propval *val)
7500 struct tcpm_port *port = power_supply_get_drvdata(psy);
7504 * All the properties below are related to USB PD. The check needs to be
7505 * property specific when a non-pd related property is added.
7507 if (!port->pd_supported)
7511 case POWER_SUPPLY_PROP_ONLINE:
7512 ret = tcpm_psy_set_online(port, val);
7514 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
7515 ret = tcpm_pps_set_out_volt(port, val->intval / 1000);
7517 case POWER_SUPPLY_PROP_CURRENT_NOW:
7518 if (val->intval > port->pps_data.max_curr * 1000)
7521 ret = tcpm_pps_set_op_curr(port, val->intval / 1000);
7527 power_supply_changed(port->psy);
7531 static int tcpm_psy_prop_writeable(struct power_supply *psy,
7532 enum power_supply_property psp)
7535 case POWER_SUPPLY_PROP_ONLINE:
7536 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
7537 case POWER_SUPPLY_PROP_CURRENT_NOW:
7544 static const char *tcpm_psy_name_prefix = "tcpm-source-psy-";
7546 static int devm_tcpm_psy_register(struct tcpm_port *port)
7548 struct power_supply_config psy_cfg = {};
7549 const char *port_dev_name = dev_name(port->dev);
7550 size_t psy_name_len = strlen(tcpm_psy_name_prefix) +
7551 strlen(port_dev_name) + 1;
7554 psy_cfg.drv_data = port;
7555 psy_cfg.fwnode = dev_fwnode(port->dev);
7556 psy_name = devm_kzalloc(port->dev, psy_name_len, GFP_KERNEL);
7560 snprintf(psy_name, psy_name_len, "%s%s", tcpm_psy_name_prefix,
7562 port->psy_desc.name = psy_name;
7563 port->psy_desc.type = POWER_SUPPLY_TYPE_USB;
7564 port->psy_desc.usb_types = BIT(POWER_SUPPLY_USB_TYPE_C) |
7565 BIT(POWER_SUPPLY_USB_TYPE_PD) |
7566 BIT(POWER_SUPPLY_USB_TYPE_PD_PPS);
7567 port->psy_desc.properties = tcpm_psy_props;
7568 port->psy_desc.num_properties = ARRAY_SIZE(tcpm_psy_props);
7569 port->psy_desc.get_property = tcpm_psy_get_prop;
7570 port->psy_desc.set_property = tcpm_psy_set_prop;
7571 port->psy_desc.property_is_writeable = tcpm_psy_prop_writeable;
7573 port->usb_type = POWER_SUPPLY_USB_TYPE_C;
7575 port->psy = devm_power_supply_register(port->dev, &port->psy_desc,
7578 return PTR_ERR_OR_ZERO(port->psy);
7581 static enum hrtimer_restart state_machine_timer_handler(struct hrtimer *timer)
7583 struct tcpm_port *port = container_of(timer, struct tcpm_port, state_machine_timer);
7585 if (port->registered)
7586 kthread_queue_work(port->wq, &port->state_machine);
7587 return HRTIMER_NORESTART;
7590 static enum hrtimer_restart vdm_state_machine_timer_handler(struct hrtimer *timer)
7592 struct tcpm_port *port = container_of(timer, struct tcpm_port, vdm_state_machine_timer);
7594 if (port->registered)
7595 kthread_queue_work(port->wq, &port->vdm_state_machine);
7596 return HRTIMER_NORESTART;
7599 static enum hrtimer_restart enable_frs_timer_handler(struct hrtimer *timer)
7601 struct tcpm_port *port = container_of(timer, struct tcpm_port, enable_frs_timer);
7603 if (port->registered)
7604 kthread_queue_work(port->wq, &port->enable_frs);
7605 return HRTIMER_NORESTART;
7608 static enum hrtimer_restart send_discover_timer_handler(struct hrtimer *timer)
7610 struct tcpm_port *port = container_of(timer, struct tcpm_port, send_discover_timer);
7612 if (port->registered)
7613 kthread_queue_work(port->wq, &port->send_discover_work);
7614 return HRTIMER_NORESTART;
7617 struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
7619 struct tcpm_port *port;
7622 if (!dev || !tcpc ||
7623 !tcpc->get_vbus || !tcpc->set_cc || !tcpc->get_cc ||
7624 !tcpc->set_polarity || !tcpc->set_vconn || !tcpc->set_vbus ||
7625 !tcpc->set_pd_rx || !tcpc->set_roles || !tcpc->pd_transmit)
7626 return ERR_PTR(-EINVAL);
7628 port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
7630 return ERR_PTR(-ENOMEM);
7635 mutex_init(&port->lock);
7636 mutex_init(&port->swap_lock);
7638 port->wq = kthread_create_worker(0, dev_name(dev));
7639 if (IS_ERR(port->wq))
7640 return ERR_CAST(port->wq);
7641 sched_set_fifo(port->wq->task);
7643 kthread_init_work(&port->state_machine, tcpm_state_machine_work);
7644 kthread_init_work(&port->vdm_state_machine, vdm_state_machine_work);
7645 kthread_init_work(&port->event_work, tcpm_pd_event_handler);
7646 kthread_init_work(&port->enable_frs, tcpm_enable_frs_work);
7647 kthread_init_work(&port->send_discover_work, tcpm_send_discover_work);
7648 hrtimer_init(&port->state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7649 port->state_machine_timer.function = state_machine_timer_handler;
7650 hrtimer_init(&port->vdm_state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7651 port->vdm_state_machine_timer.function = vdm_state_machine_timer_handler;
7652 hrtimer_init(&port->enable_frs_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7653 port->enable_frs_timer.function = enable_frs_timer_handler;
7654 hrtimer_init(&port->send_discover_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7655 port->send_discover_timer.function = send_discover_timer_handler;
7657 spin_lock_init(&port->pd_event_lock);
7659 init_completion(&port->tx_complete);
7660 init_completion(&port->swap_complete);
7661 init_completion(&port->pps_complete);
7662 tcpm_debugfs_init(port);
7664 err = tcpm_fw_get_caps(port, tcpc->fwnode);
7666 goto out_destroy_wq;
7667 err = tcpm_fw_get_snk_vdos(port, tcpc->fwnode);
7669 goto out_destroy_wq;
7671 tcpm_fw_get_timings(port, tcpc->fwnode);
7673 port->try_role = port->typec_caps.prefer_role;
7675 port->typec_caps.revision = 0x0120; /* Type-C spec release 1.2 */
7676 port->typec_caps.pd_revision = 0x0300; /* USB-PD spec release 3.0 */
7677 port->typec_caps.svdm_version = SVDM_VER_2_0;
7678 port->typec_caps.driver_data = port;
7679 port->typec_caps.ops = &tcpm_ops;
7680 port->typec_caps.orientation_aware = 1;
7682 port->partner_desc.identity = &port->partner_ident;
7684 port->role_sw = usb_role_switch_get(port->dev);
7686 port->role_sw = fwnode_usb_role_switch_get(tcpc->fwnode);
7687 if (IS_ERR(port->role_sw)) {
7688 err = PTR_ERR(port->role_sw);
7689 goto out_destroy_wq;
7692 err = devm_tcpm_psy_register(port);
7694 goto out_role_sw_put;
7695 power_supply_changed(port->psy);
7697 err = tcpm_port_register_pd(port);
7699 goto out_role_sw_put;
7702 port->typec_caps.pd = port->pds[0];
7704 port->typec_port = typec_register_port(port->dev, &port->typec_caps);
7705 if (IS_ERR(port->typec_port)) {
7706 err = PTR_ERR(port->typec_port);
7707 goto out_unregister_pd;
7710 typec_port_register_altmodes(port->typec_port,
7711 &tcpm_altmode_ops, port,
7712 port->port_altmode, ALTMODE_DISCOVERY_MAX);
7713 typec_port_register_cable_ops(port->port_altmode, ARRAY_SIZE(port->port_altmode),
7715 port->registered = true;
7717 mutex_lock(&port->lock);
7719 mutex_unlock(&port->lock);
7721 tcpm_log(port, "%s: registered", dev_name(dev));
7725 tcpm_port_unregister_pd(port);
7727 usb_role_switch_put(port->role_sw);
7729 tcpm_debugfs_exit(port);
7730 kthread_destroy_worker(port->wq);
7731 return ERR_PTR(err);
7733 EXPORT_SYMBOL_GPL(tcpm_register_port);
7735 void tcpm_unregister_port(struct tcpm_port *port)
7739 port->registered = false;
7740 kthread_destroy_worker(port->wq);
7742 hrtimer_cancel(&port->send_discover_timer);
7743 hrtimer_cancel(&port->enable_frs_timer);
7744 hrtimer_cancel(&port->vdm_state_machine_timer);
7745 hrtimer_cancel(&port->state_machine_timer);
7747 tcpm_reset_port(port);
7749 tcpm_port_unregister_pd(port);
7751 for (i = 0; i < ARRAY_SIZE(port->port_altmode); i++)
7752 typec_unregister_altmode(port->port_altmode[i]);
7753 typec_unregister_port(port->typec_port);
7754 usb_role_switch_put(port->role_sw);
7755 tcpm_debugfs_exit(port);
7757 EXPORT_SYMBOL_GPL(tcpm_unregister_port);
7760 MODULE_DESCRIPTION("USB Type-C Port Manager");
7761 MODULE_LICENSE("GPL");