device_enable_async_suspend(bus->bridge);
pci_set_bus_of_node(bus);
pci_set_bus_msi_domain(bus);
- if (bridge->msi_domain && !dev_get_msi_domain(&bus->dev))
+ if (bridge->msi_domain && !dev_get_msi_domain(&bus->dev) &&
+ !pci_host_of_has_msi_map(parent))
bus->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
if (!parent)
dev->untrusted = true;
}
+static void pci_set_removable(struct pci_dev *dev)
+{
+ struct pci_dev *parent = pci_upstream_bridge(dev);
+
+ /*
+ * We (only) consider everything downstream from an external_facing
+ * device to be removable by the user. We're mainly concerned with
+ * consumer platforms with user accessible thunderbolt ports that are
+ * vulnerable to DMA attacks, and we expect those ports to be marked by
+ * the firmware as external_facing. Devices in traditional hotplug
+ * slots can technically be removed, but the expectation is that unless
+ * the port is marked with external_facing, such devices are less
+ * accessible to user / may not be removed by end user, and thus not
+ * exposed as "removable" to userspace.
+ */
+ if (parent &&
+ (parent->external_facing || dev_is_removable(&parent->dev)))
+ dev_set_removable(&dev->dev, DEVICE_REMOVABLE);
+}
+
/**
* pci_ext_cfg_is_aliased - Is ext config space just an alias of std config?
* @dev: PCI device
/* Early fixups, before probing the BARs */
pci_fixup_device(pci_fixup_early, dev);
+ pci_set_removable(dev);
+
pci_info(dev, "[%04x:%04x] type %02x class %#08x\n",
dev->vendor, dev->device, dev->hdr_type, dev->class);
}
/**
- * select_ep - selects endpoint
+ * cdns3_select_ep - selects endpoint
* @priv_dev: extended gadget object
* @ep: endpoint address
*/
}
/**
- * cdns3_device_irq_handler- interrupt handler for device part of controller
+ * cdns3_device_irq_handler - interrupt handler for device part of controller
*
* @irq: irq number for cdns3 core device
* @data: structure of cdns3
}
/**
- * cdns3_device_thread_irq_handler- interrupt handler for device part
+ * cdns3_device_thread_irq_handler - interrupt handler for device part
* of controller
*
* @irq: irq number for cdns3 core device
else
mask = BIT(priv_ep->num);
- if (priv_ep->type != USB_ENDPOINT_XFER_ISOC) {
+ if (priv_ep->type != USB_ENDPOINT_XFER_ISOC && !priv_ep->dir) {
cdns3_set_register_bit(®s->tdl_from_trb, mask);
cdns3_set_register_bit(®s->tdl_beh, mask);
cdns3_set_register_bit(®s->tdl_beh2, mask);
}
/**
- * cdns3_ep_config Configure hardware endpoint
+ * cdns3_ep_config - Configure hardware endpoint
* @priv_ep: extended endpoint object
* @enable: set EP_CFG_ENABLE bit in ep_cfg register.
*/
case USB_ENDPOINT_XFER_INT:
ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_INT);
- if ((priv_dev->dev_ver == DEV_VER_V2 && !priv_ep->dir) ||
- priv_dev->dev_ver > DEV_VER_V2)
+ if (priv_dev->dev_ver >= DEV_VER_V2 && !priv_ep->dir)
ep_cfg |= EP_CFG_TDL_CHK;
break;
case USB_ENDPOINT_XFER_BULK:
ep_cfg = EP_CFG_EPTYPE(USB_ENDPOINT_XFER_BULK);
- if ((priv_dev->dev_ver == DEV_VER_V2 && !priv_ep->dir) ||
- priv_dev->dev_ver > DEV_VER_V2)
+ if (priv_dev->dev_ver >= DEV_VER_V2 && !priv_ep->dir)
ep_cfg |= EP_CFG_TDL_CHK;
break;
default:
}
/**
- * cdns3_gadget_ep_alloc_request Allocates request
+ * cdns3_gadget_ep_alloc_request - Allocates request
* @ep: endpoint object associated with request
* @gfp_flags: gfp flags
*
}
/**
- * cdns3_gadget_ep_free_request Free memory occupied by request
+ * cdns3_gadget_ep_free_request - Free memory occupied by request
* @ep: endpoint object associated with request
* @request: request to free memory
*/
}
/**
- * cdns3_gadget_ep_enable Enable endpoint
+ * cdns3_gadget_ep_enable - Enable endpoint
* @ep: endpoint object
* @desc: endpoint descriptor
*
}
/**
- * cdns3_gadget_ep_disable Disable endpoint
+ * cdns3_gadget_ep_disable - Disable endpoint
* @ep: endpoint object
*
* Returns 0 on success, error code elsewhere
}
/**
- * cdns3_gadget_ep_queue Transfer data on endpoint
+ * __cdns3_gadget_ep_queue - Transfer data on endpoint
* @ep: endpoint object
* @request: request object
* @gfp_flags: gfp flags
}
/**
- * cdns3_gadget_ep_dequeue Remove request from transfer queue
+ * cdns3_gadget_ep_dequeue - Remove request from transfer queue
* @ep: endpoint object associated with request
* @request: request object
*
}
/**
- * __cdns3_gadget_ep_set_halt Sets stall on selected endpoint
+ * __cdns3_gadget_ep_set_halt - Sets stall on selected endpoint
* Should be called after acquiring spin_lock and selecting ep
* @priv_ep: endpoint object to set stall on.
*/
}
/**
- * __cdns3_gadget_ep_clear_halt Clears stall on selected endpoint
+ * __cdns3_gadget_ep_clear_halt - Clears stall on selected endpoint
* Should be called after acquiring spin_lock and selecting ep
* @priv_ep: endpoint object to clear stall on
*/
}
/**
- * cdns3_gadget_ep_set_halt Sets/clears stall on selected endpoint
+ * cdns3_gadget_ep_set_halt - Sets/clears stall on selected endpoint
* @ep: endpoint object to set/clear stall on
* @value: 1 for set stall, 0 for clear stall
*
};
/**
- * cdns3_gadget_get_frame Returns number of actual ITP frame
+ * cdns3_gadget_get_frame - Returns number of actual ITP frame
* @gadget: gadget object
*
* Returns number of actual ITP frame
}
/**
- * cdns3_gadget_udc_start Gadget start
+ * cdns3_gadget_udc_start - Gadget start
* @gadget: gadget object
* @driver: driver which operates on this gadget
*
}
/**
- * cdns3_gadget_udc_stop Stops gadget
+ * cdns3_gadget_udc_stop - Stops gadget
* @gadget: gadget object
*
* Returns 0
}
/**
- * cdns3_init_eps Initializes software endpoints of gadget
+ * cdns3_init_eps - Initializes software endpoints of gadget
* @priv_dev: extended gadget object
*
* Returns 0 on success, error code elsewhere
dwc3_get_properties(dwc);
+ ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64));
+ if (ret)
+ return ret;
+
dwc->reset = devm_reset_control_array_get_optional_shared(dev);
if (IS_ERR(dwc->reset))
return PTR_ERR(dwc->reset);
pm_runtime_get_sync(&pdev->dev);
- dwc3_debugfs_exit(dwc);
dwc3_core_exit_mode(dwc);
+ dwc3_debugfs_exit(dwc);
dwc3_core_exit(dwc);
dwc3_ulpi_exit(dwc);
return 0;
}
- static void dwc3_shutdown(struct platform_device *pdev)
- {
- dwc3_remove(pdev);
- }
-
#ifdef CONFIG_PM
static int dwc3_core_init_for_resume(struct dwc3 *dwc)
{
static struct platform_driver dwc3_driver = {
.probe = dwc3_probe,
.remove = dwc3_remove,
- .shutdown = dwc3_shutdown,
.driver = {
.name = "dwc3",
.of_match_table = of_match_ptr(of_dwc3_match),
}
/*
- * Synchronize any pending event handling before executing the controller
- * halt routine.
+ * Synchronize and disable any further event handling while controller
+ * is being enabled/disabled.
*/
- if (!is_on) {
- dwc3_gadget_disable_irq(dwc);
- synchronize_irq(dwc->irq_gadget);
- }
+ disable_irq(dwc->irq_gadget);
spin_lock_irqsave(&dwc->lock, flags);
ret = dwc3_gadget_run_stop(dwc, is_on, false);
spin_unlock_irqrestore(&dwc->lock, flags);
+ enable_irq(dwc->irq_gadget);
+
pm_runtime_put(dwc->dev);
return ret;
list_del(&dep->endpoint.ep_list);
}
- debugfs_remove_recursive(debugfs_lookup(dep->name, dwc->root));
+ debugfs_remove_recursive(debugfs_lookup(dep->name,
+ debugfs_lookup(dev_name(dep->dwc->dev),
+ usb_debug_root)));
kfree(dep);
}
}
dwc3_gadget_free_endpoints(dwc);
err4:
usb_put_gadget(dwc->gadget);
+ dwc->gadget = NULL;
err3:
dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce,
dwc->bounce_addr);
void dwc3_gadget_exit(struct dwc3 *dwc)
{
+ if (!dwc->gadget)
+ return;
+
usb_del_gadget(dwc->gadget);
dwc3_gadget_free_endpoints(dwc);
usb_put_gadget(dwc->gadget);
static struct ffs_dev *_ffs_find_dev(const char *name);
static struct ffs_dev *_ffs_alloc_dev(void);
static void _ffs_free_dev(struct ffs_dev *dev);
-static void *ffs_acquire_dev(const char *dev_name);
-static void ffs_release_dev(struct ffs_data *ffs_data);
+static int ffs_acquire_dev(const char *dev_name, struct ffs_data *ffs_data);
+static void ffs_release_dev(struct ffs_dev *ffs_dev);
static int ffs_ready(struct ffs_data *ffs);
static void ffs_closed(struct ffs_data *ffs);
static int ffs_fs_get_tree(struct fs_context *fc)
{
struct ffs_sb_fill_data *ctx = fc->fs_private;
- void *ffs_dev;
struct ffs_data *ffs;
+ int ret;
ENTER();
return -ENOMEM;
}
- ffs_dev = ffs_acquire_dev(ffs->dev_name);
- if (IS_ERR(ffs_dev)) {
+ ret = ffs_acquire_dev(ffs->dev_name, ffs);
+ if (ret) {
ffs_data_put(ffs);
- return PTR_ERR(ffs_dev);
+ return ret;
}
- ffs->private_data = ffs_dev;
ctx->ffs_data = ffs;
return get_tree_nodev(fc, ffs_sb_fill);
}
if (ctx) {
if (ctx->ffs_data) {
- ffs_release_dev(ctx->ffs_data);
ffs_data_put(ctx->ffs_data);
}
ENTER();
kill_litter_super(sb);
- if (sb->s_fs_info) {
- ffs_release_dev(sb->s_fs_info);
+ if (sb->s_fs_info)
ffs_data_closed(sb->s_fs_info);
- }
}
static struct file_system_type ffs_fs_type = {
if (refcount_dec_and_test(&ffs->ref)) {
pr_info("%s(): freeing\n", __func__);
ffs_data_clear(ffs);
+ ffs_release_dev(ffs->private_data);
BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
swait_active(&ffs->ep0req_completion.wait) ||
waitqueue_active(&ffs->wait));
struct ffs_function *func = ffs_func_from_usb(f);
struct f_fs_opts *ffs_opts =
container_of(f->fi, struct f_fs_opts, func_inst);
+ struct ffs_data *ffs_data;
int ret;
ENTER();
if (!ffs_opts->no_configfs)
ffs_dev_lock();
ret = ffs_opts->dev->desc_ready ? 0 : -ENODEV;
- func->ffs = ffs_opts->dev->ffs_data;
+ ffs_data = ffs_opts->dev->ffs_data;
if (!ffs_opts->no_configfs)
ffs_dev_unlock();
if (ret)
return ERR_PTR(ret);
+ func->ffs = ffs_data;
func->conf = c;
func->gadget = c->cdev->gadget;
struct f_fs_opts *opts;
opts = to_f_fs_opts(f);
+ ffs_release_dev(opts->dev);
ffs_dev_lock();
_ffs_free_dev(opts->dev);
ffs_dev_unlock();
ffs->func = NULL;
}
+ /* Drain any pending AIO completions */
+ drain_workqueue(ffs->io_completion_wq);
+
if (!--opts->refcnt)
functionfs_unbind(ffs);
{
list_del(&dev->entry);
- /* Clear the private_data pointer to stop incorrect dev access */
- if (dev->ffs_data)
- dev->ffs_data->private_data = NULL;
-
kfree(dev);
if (list_empty(&ffs_devices))
functionfs_cleanup();
}
-static void *ffs_acquire_dev(const char *dev_name)
+static int ffs_acquire_dev(const char *dev_name, struct ffs_data *ffs_data)
{
+ int ret = 0;
struct ffs_dev *ffs_dev;
ENTER();
ffs_dev_lock();
ffs_dev = _ffs_find_dev(dev_name);
- if (!ffs_dev)
- ffs_dev = ERR_PTR(-ENOENT);
- else if (ffs_dev->mounted)
- ffs_dev = ERR_PTR(-EBUSY);
- else if (ffs_dev->ffs_acquire_dev_callback &&
- ffs_dev->ffs_acquire_dev_callback(ffs_dev))
- ffs_dev = ERR_PTR(-ENOENT);
- else
+ if (!ffs_dev) {
+ ret = -ENOENT;
+ } else if (ffs_dev->mounted) {
+ ret = -EBUSY;
+ } else if (ffs_dev->ffs_acquire_dev_callback &&
+ ffs_dev->ffs_acquire_dev_callback(ffs_dev)) {
+ ret = -ENOENT;
+ } else {
ffs_dev->mounted = true;
+ ffs_dev->ffs_data = ffs_data;
+ ffs_data->private_data = ffs_dev;
+ }
ffs_dev_unlock();
- return ffs_dev;
+ return ret;
}
-static void ffs_release_dev(struct ffs_data *ffs_data)
+static void ffs_release_dev(struct ffs_dev *ffs_dev)
{
- struct ffs_dev *ffs_dev;
-
ENTER();
ffs_dev_lock();
- ffs_dev = ffs_data->private_data;
- if (ffs_dev) {
+ if (ffs_dev && ffs_dev->mounted) {
ffs_dev->mounted = false;
+ if (ffs_dev->ffs_data) {
+ ffs_dev->ffs_data->private_data = NULL;
+ ffs_dev->ffs_data = NULL;
+ }
if (ffs_dev->ffs_release_dev_callback)
ffs_dev->ffs_release_dev_callback(ffs_dev);
}
ffs_obj->desc_ready = true;
- ffs_obj->ffs_data = ffs;
if (ffs_obj->ffs_ready_callback) {
ret = ffs_obj->ffs_ready_callback(ffs);
goto done;
ffs_obj->desc_ready = false;
- ffs_obj->ffs_data = NULL;
if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags) &&
ffs_obj->ffs_closed_callback)
hidg_fs_out_ep_desc.bEndpointAddress;
status = usb_assign_descriptors(f, hidg_fs_descriptors,
- hidg_hs_descriptors, hidg_ss_descriptors, NULL);
+ hidg_hs_descriptors, hidg_ss_descriptors,
+ hidg_ss_descriptors);
if (status)
goto fail;
hidg->func.setup = hidg_setup;
hidg->func.free_func = hidg_free;
- /* this could me made configurable at some point */
+ /* this could be made configurable at some point */
hidg->qlen = 4;
return &hidg->func;
value = usb_ep_queue(dev->in_ep, req, GFP_ATOMIC);
spin_lock(&dev->lock);
if (value) {
- list_del(&req->list);
- list_add(&req->list, &dev->tx_reqs);
+ list_move(&req->list, &dev->tx_reqs);
spin_unlock_irqrestore(&dev->lock, flags);
mutex_unlock(&dev->lock_printer_io);
return -EAGAIN;
ss_ep_out_desc.bEndpointAddress = fs_ep_out_desc.bEndpointAddress;
ret = usb_assign_descriptors(f, fs_printer_function,
- hs_printer_function, ss_printer_function, NULL);
+ hs_printer_function, ss_printer_function,
+ ss_printer_function);
if (ret)
return ret;
schedule_delayed_work(&musb->irq_work,
msecs_to_jiffies(1000));
musb->quirk_retries--;
- break;
}
- fallthrough;
+ break;
case MUSB_QUIRK_B_INVALID_VBUS_91:
if (musb->quirk_retries && !musb->flush_irq_work) {
musb_dbg(musb,
dev_err(musb->controller, "Could not enable: %i\n",
error);
musb->quirk_retries = 3;
+
+ /*
+ * We can get a spurious MUSB_INTR_SESSREQ interrupt on start-up
+ * in B-peripheral mode with nothing connected and the session
+ * bit clears silently. Check status again in 3 seconds.
+ */
+ if (devctl & MUSB_DEVCTL_BDEVICE)
+ schedule_delayed_work(&musb->irq_work,
+ msecs_to_jiffies(3000));
} else {
musb_dbg(musb, "Allow PM with no session: %02x", devctl);
pm_runtime_mark_last_busy(musb->controller);
#include "class.h"
#include "mux.h"
-static bool dev_name_ends_with(struct device *dev, const char *suffix)
-{
- const char *name = dev_name(dev);
- const int name_len = strlen(name);
- const int suffix_len = strlen(suffix);
-
- if (suffix_len > name_len)
- return false;
-
- return strcmp(name + (name_len - suffix_len), suffix) == 0;
-}
-
static int switch_fwnode_match(struct device *dev, const void *fwnode)
{
- return dev_fwnode(dev) == fwnode && dev_name_ends_with(dev, "-switch");
+ if (!is_typec_switch(dev))
+ return 0;
+
+ return dev_fwnode(dev) == fwnode;
}
static void *typec_switch_match(struct fwnode_handle *fwnode, const char *id,
{
struct device *dev;
+ /*
+ * Device graph (OF graph) does not give any means to identify the
+ * device type or the device class of the remote port parent that @fwnode
+ * represents, so in order to identify the type or the class of @fwnode
+ * an additional device property is needed. With typec switches the
+ * property is named "orientation-switch" (@id). The value of the device
+ * property is ignored.
+ */
if (id && !fwnode_property_present(fwnode, id))
return NULL;
+ /*
+ * At this point we are sure that @fwnode is a typec switch in all
+ * cases. If the switch hasn't yet been registered for some reason, the
+ * function "defers probe" for now.
+ */
dev = class_find_device(&typec_mux_class, NULL, fwnode,
switch_fwnode_match);
kfree(to_typec_switch(dev));
}
-static const struct device_type typec_switch_dev_type = {
+const struct device_type typec_switch_dev_type = {
.name = "orientation_switch",
.release = typec_switch_release,
};
static int mux_fwnode_match(struct device *dev, const void *fwnode)
{
- return dev_fwnode(dev) == fwnode && dev_name_ends_with(dev, "-mux");
+ if (!is_typec_mux(dev))
+ return 0;
+
+ return dev_fwnode(dev) == fwnode;
}
static void *typec_mux_match(struct fwnode_handle *fwnode, const char *id,
dev = class_find_device(&typec_mux_class, NULL, fwnode,
mux_fwnode_match);
- return dev ? to_typec_switch(dev) : ERR_PTR(-EPROBE_DEFER);
+ return dev ? to_typec_mux(dev) : ERR_PTR(-EPROBE_DEFER);
}
/**
kfree(to_typec_mux(dev));
}
-static const struct device_type typec_mux_dev_type = {
+const struct device_type typec_mux_dev_type = {
.name = "mode_switch",
.release = typec_mux_release,
};
/*
* Input Output Manager (IOM) PORT STATUS
*/
-#define IOM_PORT_STATUS_OFFSET 0x560
-
#define IOM_PORT_STATUS_ACTIVITY_TYPE_MASK GENMASK(9, 6)
#define IOM_PORT_STATUS_ACTIVITY_TYPE_SHIFT 6
#define IOM_PORT_STATUS_ACTIVITY_TYPE_USB 0x03
struct pmc_usb_port *port;
struct acpi_device *iom_adev;
void __iomem *iom_base;
+ u32 iom_port_status_offset;
};
static void update_port_status(struct pmc_usb_port *port)
/* SoC expects the USB Type-C port numbers to start with 0 */
port_num = port->usb3_port - 1;
- port->iom_status = readl(port->pmc->iom_base + IOM_PORT_STATUS_OFFSET +
+ port->iom_status = readl(port->pmc->iom_base +
+ port->pmc->iom_port_status_offset +
port_num * sizeof(u32));
}
return !acpi_dev_resource_memory(res, &r);
}
+/* IOM ACPI IDs and IOM_PORT_STATUS_OFFSET */
+static const struct acpi_device_id iom_acpi_ids[] = {
+ /* TigerLake */
+ { "INTC1072", 0x560, },
+
+ /* AlderLake */
+ { "INTC1079", 0x160, },
+ {}
+};
+
static int pmc_usb_probe_iom(struct pmc_usb *pmc)
{
struct list_head resource_list;
struct resource_entry *rentry;
- struct acpi_device *adev;
+ static const struct acpi_device_id *dev_id;
+ struct acpi_device *adev = NULL;
int ret;
- adev = acpi_dev_get_first_match_dev("INTC1072", NULL, -1);
+ for (dev_id = &iom_acpi_ids[0]; dev_id->id[0]; dev_id++) {
+ if (acpi_dev_present(dev_id->id, NULL, -1)) {
+ pmc->iom_port_status_offset = (u32)dev_id->driver_data;
+ adev = acpi_dev_get_first_match_dev(dev_id->id, NULL, -1);
+ break;
+ }
+ }
+
if (!adev)
return -ENODEV;
acpi_dev_free_resource_list(&resource_list);
if (!pmc->iom_base) {
- put_device(&adev->dev);
+ acpi_dev_put(adev);
return -ENOMEM;
}
+ if (IS_ERR(pmc->iom_base)) {
+ acpi_dev_put(adev);
+ return PTR_ERR(pmc->iom_base);
+ }
+
pmc->iom_adev = adev;
return 0;
break;
ret = pmc_usb_register_port(pmc, i, fwnode);
- if (ret)
+ if (ret) {
+ fwnode_handle_put(fwnode);
goto err_remove_ports;
+ }
}
platform_set_drvdata(pdev, pmc);
usb_role_switch_unregister(pmc->port[i].usb_sw);
}
- put_device(&pmc->iom_adev->dev);
+ acpi_dev_put(pmc->iom_adev);
return ret;
}
usb_role_switch_unregister(pmc->port[i].usb_sw);
}
- put_device(&pmc->iom_adev->dev);
+ acpi_dev_put(pmc->iom_adev);
return 0;
}
unsigned int nr_src_pdo;
u32 snk_pdo[PDO_MAX_OBJECTS];
unsigned int nr_snk_pdo;
+ u32 snk_vdo_v1[VDO_MAX_OBJECTS];
+ unsigned int nr_snk_vdo_v1;
u32 snk_vdo[VDO_MAX_OBJECTS];
unsigned int nr_snk_vdo;
port->tcpc->set_cc(port->tcpc, cc);
}
+static int tcpm_enable_auto_vbus_discharge(struct tcpm_port *port, bool enable)
+{
+ int ret = 0;
+
+ if (port->tcpc->enable_auto_vbus_discharge) {
+ ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, enable);
+ tcpm_log_force(port, "%s vbus discharge ret:%d", enable ? "enable" : "disable",
+ ret);
+ if (!ret)
+ port->auto_vbus_discharge_enabled = enable;
+ }
+
+ return ret;
+}
+
+static void tcpm_apply_rc(struct tcpm_port *port)
+{
+ /*
+ * TCPCI: Move to APPLY_RC state to prevent disconnect during PR_SWAP
+ * when Vbus auto discharge on disconnect is enabled.
+ */
+ if (port->tcpc->enable_auto_vbus_discharge && port->tcpc->apply_rc) {
+ tcpm_log(port, "Apply_RC");
+ port->tcpc->apply_rc(port->tcpc, port->cc_req, port->polarity);
+ tcpm_enable_auto_vbus_discharge(port, false);
+ }
+}
+
/*
* Determine RP value to set based on maximum current supported
* by a port if configured as source.
if (PD_VDO_VID(p[0]) != USB_SID_PD)
break;
- if (PD_VDO_SVDM_VER(p[0]) < svdm_version)
+ if (PD_VDO_SVDM_VER(p[0]) < svdm_version) {
typec_partner_set_svdm_version(port->partner,
PD_VDO_SVDM_VER(p[0]));
+ svdm_version = PD_VDO_SVDM_VER(p[0]);
+ }
- tcpm_ams_start(port, DISCOVER_IDENTITY);
- /* 6.4.4.3.1: Only respond as UFP (device) */
- if (port->data_role == TYPEC_DEVICE &&
+ port->ams = DISCOVER_IDENTITY;
+ /*
+ * PD2.0 Spec 6.10.3: respond with NAK as DFP (data host)
+ * PD3.1 Spec 6.4.4.2.5.1: respond with NAK if "invalid field" or
+ * "wrong configuation" or "Unrecognized"
+ */
+ if ((port->data_role == TYPEC_DEVICE || svdm_version >= SVDM_VER_2_0) &&
port->nr_snk_vdo) {
- /*
- * Product Type DFP and Connector Type are not defined in SVDM
- * version 1.0 and shall be set to zero.
- */
- if (typec_get_negotiated_svdm_version(typec) < SVDM_VER_2_0)
- response[1] = port->snk_vdo[0] & ~IDH_DFP_MASK
- & ~IDH_CONN_MASK;
- else
- response[1] = port->snk_vdo[0];
- for (i = 1; i < port->nr_snk_vdo; i++)
- response[i + 1] = port->snk_vdo[i];
- rlen = port->nr_snk_vdo + 1;
+ if (svdm_version < SVDM_VER_2_0) {
+ for (i = 0; i < port->nr_snk_vdo_v1; i++)
+ response[i + 1] = port->snk_vdo_v1[i];
+ rlen = port->nr_snk_vdo_v1 + 1;
+
+ } else {
+ for (i = 0; i < port->nr_snk_vdo; i++)
+ response[i + 1] = port->snk_vdo[i];
+ rlen = port->nr_snk_vdo + 1;
+ }
}
break;
case CMD_DISCOVER_SVID:
- tcpm_ams_start(port, DISCOVER_SVIDS);
+ port->ams = DISCOVER_SVIDS;
break;
case CMD_DISCOVER_MODES:
- tcpm_ams_start(port, DISCOVER_MODES);
+ port->ams = DISCOVER_MODES;
break;
case CMD_ENTER_MODE:
- tcpm_ams_start(port, DFP_TO_UFP_ENTER_MODE);
+ port->ams = DFP_TO_UFP_ENTER_MODE;
break;
case CMD_EXIT_MODE:
- tcpm_ams_start(port, DFP_TO_UFP_EXIT_MODE);
+ port->ams = DFP_TO_UFP_EXIT_MODE;
break;
case CMD_ATTENTION:
- tcpm_ams_start(port, ATTENTION);
/* Attention command does not have response */
*adev_action = ADEV_ATTENTION;
return 0;
tcpm_log(port, "VDM Tx error, retry");
port->vdm_retries++;
port->vdm_state = VDM_STATE_READY;
+ if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMDT(vdo_hdr) == CMDT_INIT)
+ tcpm_ams_finish(port);
+ } else {
tcpm_ams_finish(port);
}
break;
if (!type) {
tcpm_log(port, "Alert message received with no type");
+ tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
return;
}
/* Just handling non-battery alerts for now */
if (!(type & USB_PD_ADO_TYPE_BATT_STATUS_CHANGE)) {
- switch (port->state) {
- case SRC_READY:
- case SNK_READY:
+ if (port->pwr_role == TYPEC_SOURCE) {
+ port->upcoming_state = GET_STATUS_SEND;
+ tcpm_ams_start(port, GETTING_SOURCE_SINK_STATUS);
+ } else {
+ /*
+ * Do not check SinkTxOk here in case the Source doesn't set its Rp to
+ * SinkTxOk in time.
+ */
+ port->ams = GETTING_SOURCE_SINK_STATUS;
tcpm_set_state(port, GET_STATUS_SEND, 0);
- break;
- default:
- tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
- break;
}
+ } else {
+ tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
}
}
tcpm_pd_handle_state(port, BIST_RX, BIST, 0);
break;
case PD_DATA_ALERT:
- tcpm_handle_alert(port, msg->payload, cnt);
+ if (port->state != SRC_READY && port->state != SNK_READY)
+ tcpm_pd_handle_state(port, port->pwr_role == TYPEC_SOURCE ?
+ SRC_SOFT_RESET_WAIT_SNK_TX : SNK_SOFT_RESET,
+ NONE_AMS, 0);
+ else
+ tcpm_handle_alert(port, msg->payload, cnt);
break;
case PD_DATA_BATT_STATUS:
case PD_DATA_GET_COUNTRY_INFO:
switch (type) {
case PD_EXT_STATUS:
- /*
- * If PPS related events raised then get PPS status to clear
- * (see USB PD 3.0 Spec, 6.5.2.4)
- */
- if (msg->ext_msg.data[USB_PD_EXT_SDB_EVENT_FLAGS] &
- USB_PD_EXT_SDB_PPS_EVENTS)
- tcpm_pd_handle_state(port, GET_PPS_STATUS_SEND,
- GETTING_SOURCE_SINK_STATUS, 0);
-
- else
- tcpm_pd_handle_state(port, ready_state(port), NONE_AMS, 0);
- break;
case PD_EXT_PPS_STATUS:
- /*
- * For now the PPS status message is used to clear events
- * and nothing more.
- */
- tcpm_pd_handle_state(port, ready_state(port), NONE_AMS, 0);
+ if (port->ams == GETTING_SOURCE_SINK_STATUS) {
+ tcpm_ams_finish(port);
+ tcpm_set_state(port, ready_state(port), 0);
+ } else {
+ /* unexpected Status or PPS_Status Message */
+ tcpm_pd_handle_state(port, port->pwr_role == TYPEC_SOURCE ?
+ SRC_SOFT_RESET_WAIT_SNK_TX : SNK_SOFT_RESET,
+ NONE_AMS, 0);
+ }
break;
case PD_EXT_SOURCE_CAP_EXT:
case PD_EXT_GET_BATT_CAP:
if (ret < 0)
return ret;
- if (port->tcpc->enable_auto_vbus_discharge) {
- ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, true);
- tcpm_log_force(port, "enable vbus discharge ret:%d", ret);
- if (!ret)
- port->auto_vbus_discharge_enabled = true;
- }
+ tcpm_enable_auto_vbus_discharge(port, true);
ret = tcpm_set_roles(port, true, TYPEC_SOURCE, tcpm_data_role_for_source(port));
if (ret < 0)
static void tcpm_reset_port(struct tcpm_port *port)
{
- int ret;
-
- if (port->tcpc->enable_auto_vbus_discharge) {
- ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, false);
- tcpm_log_force(port, "Disable vbus discharge ret:%d", ret);
- if (!ret)
- port->auto_vbus_discharge_enabled = false;
- }
+ tcpm_enable_auto_vbus_discharge(port, false);
port->in_ams = false;
port->ams = NONE_AMS;
port->vdm_sm_running = false;
if (ret < 0)
return ret;
- if (port->tcpc->enable_auto_vbus_discharge) {
- tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, VSAFE5V);
- ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, true);
- tcpm_log_force(port, "enable vbus discharge ret:%d", ret);
- if (!ret)
- port->auto_vbus_discharge_enabled = true;
- }
+ tcpm_enable_auto_vbus_discharge(port, true);
ret = tcpm_set_roles(port, true, TYPEC_SINK, tcpm_data_role_for_sink(port));
if (ret < 0)
tcpm_set_state(port, ready_state(port), 0);
break;
case PR_SWAP_START:
+ tcpm_apply_rc(port);
if (port->pwr_role == TYPEC_SOURCE)
tcpm_set_state(port, PR_SWAP_SRC_SNK_TRANSITION_OFF,
PD_T_SRC_TRANSITION);
tcpm_set_state(port, ERROR_RECOVERY, PD_T_PS_SOURCE_ON_PRS);
break;
case PR_SWAP_SRC_SNK_SINK_ON:
+ tcpm_enable_auto_vbus_discharge(port, true);
/* Set the vbus disconnect threshold for implicit contract */
tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, VSAFE5V);
tcpm_set_state(port, SNK_STARTUP, 0);
PD_T_PS_SOURCE_OFF);
break;
case PR_SWAP_SNK_SRC_SOURCE_ON:
+ tcpm_enable_auto_vbus_discharge(port, true);
tcpm_set_cc(port, tcpm_rp_cc(port));
tcpm_set_vbus(port, true);
/*
tcpm_set_state(port, SNK_UNATTACHED, 0);
}
break;
+ case PR_SWAP_SNK_SRC_SINK_OFF:
+ /* Do nothing, vsafe0v is expected during transition */
+ break;
default:
if (port->pwr_role == TYPEC_SINK && port->auto_vbus_discharge_enabled)
tcpm_set_state(port, SNK_UNATTACHED, 0);
return ret;
}
+ /* If sink-vdos is found, sink-vdos-v1 is expected for backward compatibility. */
+ if (port->nr_snk_vdo) {
+ ret = fwnode_property_count_u32(fwnode, "sink-vdos-v1");
+ if (ret < 0)
+ return ret;
+ else if (ret == 0)
+ return -ENODATA;
+
+ port->nr_snk_vdo_v1 = min(ret, VDO_MAX_OBJECTS);
+ ret = fwnode_property_read_u32_array(fwnode, "sink-vdos-v1",
+ port->snk_vdo_v1,
+ port->nr_snk_vdo_v1);
+ if (ret < 0)
+ return ret;
+ }
+
return 0;
}
{
int i;
+ hrtimer_cancel(&port->send_discover_timer);
+ hrtimer_cancel(&port->enable_frs_timer);
+ hrtimer_cancel(&port->vdm_state_machine_timer);
+ hrtimer_cancel(&port->state_machine_timer);
+
tcpm_reset_port(port);
for (i = 0; i < ARRAY_SIZE(port->port_altmode); i++)
typec_unregister_altmode(port->port_altmode[i]);
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* typec_wcove.c - WhiskeyCove PMIC USB Type-C PHY driver
*
* Copyright (C) 2017 Intel Corporation
const u8 *data = (void *)msg;
int i;
- for (i = 0; i < pd_header_cnt(msg->header) * 4 + 2; i++) {
+ for (i = 0; i < pd_header_cnt_le(msg->header) * 4 + 2; i++) {
ret = regmap_write(wcove->regmap, USBC_TX_DATA + i,
data[i]);
if (ret)
goto err_reset;
}
- /* Allocate the connectors. Released in ucsi_unregister_ppm() */
+ /* Allocate the connectors. Released in ucsi_unregister() */
ucsi->connector = kcalloc(ucsi->cap.num_connectors + 1,
sizeof(*ucsi->connector), GFP_KERNEL);
if (!ucsi->connector) {
}
err_reset:
+ memset(&ucsi->cap, 0, sizeof(ucsi->cap));
ucsi_reset_ppm(ucsi);
err:
return ret;
EXPORT_SYMBOL_GPL(ucsi_get_drvdata);
/**
- * ucsi_get_drvdata - Assign private driver data pointer
+ * ucsi_set_drvdata - Assign private driver data pointer
* @ucsi: UCSI interface
* @data: Private data pointer
*/