1 // SPDX-License-Identifier: GPL-2.0+
3 * Mellanox hotplug driver
5 * Copyright (C) 2016-2020 Mellanox Technologies
8 #include <linux/bitops.h>
9 #include <linux/device.h>
10 #include <linux/hwmon.h>
11 #include <linux/hwmon-sysfs.h>
12 #include <linux/i2c.h>
13 #include <linux/interrupt.h>
14 #include <linux/module.h>
15 #include <linux/of_device.h>
16 #include <linux/platform_data/mlxreg.h>
17 #include <linux/platform_device.h>
18 #include <linux/spinlock.h>
19 #include <linux/string_helpers.h>
20 #include <linux/regmap.h>
21 #include <linux/workqueue.h>
23 /* Offset of event and mask registers from status register. */
24 #define MLXREG_HOTPLUG_EVENT_OFF 1
25 #define MLXREG_HOTPLUG_MASK_OFF 2
26 #define MLXREG_HOTPLUG_AGGR_MASK_OFF 1
28 /* ASIC good health mask. */
29 #define MLXREG_HOTPLUG_GOOD_HEALTH_MASK 0x02
31 #define MLXREG_HOTPLUG_ATTRS_MAX 128
32 #define MLXREG_HOTPLUG_NOT_ASSERT 3
35 * struct mlxreg_hotplug_priv_data - platform private data:
36 * @irq: platform device interrupt number;
38 * @pdev: platform device;
39 * @plat: platform data;
40 * @regmap: register map handle;
41 * @dwork_irq: delayed work template;
43 * @hwmon: hwmon device;
44 * @mlxreg_hotplug_attr: sysfs attributes array;
45 * @mlxreg_hotplug_dev_attr: sysfs sensor device attribute array;
46 * @group: sysfs attribute group;
47 * @groups: list of sysfs attribute group for hwmon registration;
48 * @cell: location of top aggregation interrupt register;
49 * @mask: top aggregation interrupt common mask;
50 * @aggr_cache: last value of aggregation register status;
51 * @after_probe: flag indication probing completion;
52 * @not_asserted: number of entries in workqueue with no signal assertion;
54 struct mlxreg_hotplug_priv_data {
57 struct platform_device *pdev;
58 struct mlxreg_hotplug_platform_data *plat;
59 struct regmap *regmap;
60 struct delayed_work dwork_irq;
61 spinlock_t lock; /* sync with interrupt */
63 struct attribute *mlxreg_hotplug_attr[MLXREG_HOTPLUG_ATTRS_MAX + 1];
64 struct sensor_device_attribute_2
65 mlxreg_hotplug_dev_attr[MLXREG_HOTPLUG_ATTRS_MAX];
66 struct attribute_group group;
67 const struct attribute_group *groups[2];
75 /* Environment variables array for udev. */
76 static char *mlxreg_hotplug_udev_envp[] = { NULL, NULL };
79 mlxreg_hotplug_udev_event_send(struct kobject *kobj,
80 struct mlxreg_core_data *data, bool action)
82 char event_str[MLXREG_CORE_LABEL_MAX_SIZE + 2];
83 char label[MLXREG_CORE_LABEL_MAX_SIZE] = { 0 };
85 mlxreg_hotplug_udev_envp[0] = event_str;
86 string_upper(label, data->label);
87 snprintf(event_str, MLXREG_CORE_LABEL_MAX_SIZE, "%s=%d", label, !!action);
89 return kobject_uevent_env(kobj, KOBJ_CHANGE, mlxreg_hotplug_udev_envp);
93 mlxreg_hotplug_pdata_export(void *pdata, void *regmap)
95 struct mlxreg_core_hotplug_platform_data *dev_pdata = pdata;
97 /* Export regmap to underlying device. */
98 dev_pdata->regmap = regmap;
101 static int mlxreg_hotplug_device_create(struct mlxreg_hotplug_priv_data *priv,
102 struct mlxreg_core_data *data,
103 enum mlxreg_hotplug_kind kind)
105 struct i2c_board_info *brdinfo = data->hpdev.brdinfo;
106 struct mlxreg_core_hotplug_platform_data *pdata;
107 struct i2c_client *client;
109 /* Notify user by sending hwmon uevent. */
110 mlxreg_hotplug_udev_event_send(&priv->hwmon->kobj, data, true);
113 * Return if adapter number is negative. It could be in case hotplug
114 * event is not associated with hotplug device.
116 if (data->hpdev.nr < 0)
119 pdata = dev_get_platdata(&priv->pdev->dev);
120 switch (data->hpdev.action) {
121 case MLXREG_HOTPLUG_DEVICE_DEFAULT_ACTION:
122 data->hpdev.adapter = i2c_get_adapter(data->hpdev.nr +
124 if (!data->hpdev.adapter) {
125 dev_err(priv->dev, "Failed to get adapter for bus %d\n",
126 data->hpdev.nr + pdata->shift_nr);
130 /* Export platform data to underlying device. */
131 if (brdinfo->platform_data)
132 mlxreg_hotplug_pdata_export(brdinfo->platform_data, pdata->regmap);
134 client = i2c_new_client_device(data->hpdev.adapter,
136 if (IS_ERR(client)) {
137 dev_err(priv->dev, "Failed to create client %s at bus %d at addr 0x%02x\n",
138 brdinfo->type, data->hpdev.nr +
139 pdata->shift_nr, brdinfo->addr);
141 i2c_put_adapter(data->hpdev.adapter);
142 data->hpdev.adapter = NULL;
143 return PTR_ERR(client);
146 data->hpdev.client = client;
148 case MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION:
149 /* Export platform data to underlying device. */
150 if (data->hpdev.brdinfo && data->hpdev.brdinfo->platform_data)
151 mlxreg_hotplug_pdata_export(data->hpdev.brdinfo->platform_data,
153 /* Pass parent hotplug device handle to underlying device. */
154 data->notifier = data->hpdev.notifier;
155 data->hpdev.pdev = platform_device_register_resndata(&priv->pdev->dev,
160 if (IS_ERR(data->hpdev.pdev))
161 return PTR_ERR(data->hpdev.pdev);
168 if (data->hpdev.notifier && data->hpdev.notifier->user_handler)
169 return data->hpdev.notifier->user_handler(data->hpdev.notifier->handle, kind, 1);
175 mlxreg_hotplug_device_destroy(struct mlxreg_hotplug_priv_data *priv,
176 struct mlxreg_core_data *data,
177 enum mlxreg_hotplug_kind kind)
179 /* Notify user by sending hwmon uevent. */
180 mlxreg_hotplug_udev_event_send(&priv->hwmon->kobj, data, false);
181 if (data->hpdev.notifier && data->hpdev.notifier->user_handler)
182 data->hpdev.notifier->user_handler(data->hpdev.notifier->handle, kind, 0);
184 switch (data->hpdev.action) {
185 case MLXREG_HOTPLUG_DEVICE_DEFAULT_ACTION:
186 if (data->hpdev.client) {
187 i2c_unregister_device(data->hpdev.client);
188 data->hpdev.client = NULL;
191 if (data->hpdev.adapter) {
192 i2c_put_adapter(data->hpdev.adapter);
193 data->hpdev.adapter = NULL;
196 case MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION:
197 if (data->hpdev.pdev)
198 platform_device_unregister(data->hpdev.pdev);
205 static ssize_t mlxreg_hotplug_attr_show(struct device *dev,
206 struct device_attribute *attr,
209 struct mlxreg_hotplug_priv_data *priv = dev_get_drvdata(dev);
210 struct mlxreg_core_hotplug_platform_data *pdata;
211 int index = to_sensor_dev_attr_2(attr)->index;
212 int nr = to_sensor_dev_attr_2(attr)->nr;
213 struct mlxreg_core_item *item;
214 struct mlxreg_core_data *data;
218 pdata = dev_get_platdata(&priv->pdev->dev);
219 item = pdata->items + nr;
220 data = item->data + index;
222 ret = regmap_read(priv->regmap, data->reg, ®val);
227 regval &= data->mask;
229 /* Bit = 0 : functional if item->inversed is true. */
231 regval = !(regval & data->mask);
233 regval = !!(regval & data->mask);
236 return sprintf(buf, "%u\n", regval);
239 #define PRIV_ATTR(i) priv->mlxreg_hotplug_attr[i]
240 #define PRIV_DEV_ATTR(i) priv->mlxreg_hotplug_dev_attr[i]
242 static int mlxreg_hotplug_attr_init(struct mlxreg_hotplug_priv_data *priv)
244 struct mlxreg_core_hotplug_platform_data *pdata;
245 struct mlxreg_core_item *item;
246 struct mlxreg_core_data *data;
249 int num_attrs = 0, id = 0, i, j, k, ret;
251 pdata = dev_get_platdata(&priv->pdev->dev);
254 /* Go over all kinds of items - psu, pwr, fan. */
255 for (i = 0; i < pdata->counter; i++, item++) {
256 if (item->capability) {
258 * Read group capability register to get actual number
259 * of interrupt capable components and set group mask
262 ret = regmap_read(priv->regmap, item->capability,
267 item->mask = GENMASK((regval & item->mask) - 1, 0);
272 /* Go over all unmasked units within item. */
275 for_each_set_bit(j, &mask, item->count) {
276 if (data->capability) {
278 * Read capability register and skip non
279 * relevant attributes.
281 ret = regmap_read(priv->regmap,
282 data->capability, ®val);
285 if (!(regval & data->bit)) {
290 PRIV_ATTR(id) = &PRIV_DEV_ATTR(id).dev_attr.attr;
291 PRIV_ATTR(id)->name = devm_kasprintf(&priv->pdev->dev,
295 if (!PRIV_ATTR(id)->name) {
296 dev_err(priv->dev, "Memory allocation failed for attr %d.\n",
301 PRIV_DEV_ATTR(id).dev_attr.attr.name =
303 PRIV_DEV_ATTR(id).dev_attr.attr.mode = 0444;
304 PRIV_DEV_ATTR(id).dev_attr.show =
305 mlxreg_hotplug_attr_show;
306 PRIV_DEV_ATTR(id).nr = i;
307 PRIV_DEV_ATTR(id).index = k;
308 sysfs_attr_init(&PRIV_DEV_ATTR(id).dev_attr.attr);
316 priv->group.attrs = devm_kcalloc(&priv->pdev->dev,
318 sizeof(struct attribute *),
320 if (!priv->group.attrs)
323 priv->group.attrs = priv->mlxreg_hotplug_attr;
324 priv->groups[0] = &priv->group;
325 priv->groups[1] = NULL;
331 mlxreg_hotplug_work_helper(struct mlxreg_hotplug_priv_data *priv,
332 struct mlxreg_core_item *item)
334 struct mlxreg_core_data *data;
335 unsigned long asserted;
340 * Validate if item related to received signal type is valid.
341 * It should never happen, excepted the situation when some
342 * piece of hardware is broken. In such situation just produce
343 * error message and return. Caller must continue to handle the
344 * signals from other devices if any.
346 if (unlikely(!item)) {
347 dev_err(priv->dev, "False signal: at offset:mask 0x%02x:0x%02x.\n",
348 item->reg, item->mask);
354 ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_MASK_OFF,
360 ret = regmap_read(priv->regmap, item->reg, ®val);
364 /* Set asserted bits and save last status. */
365 regval &= item->mask;
366 asserted = item->cache ^ regval;
367 item->cache = regval;
369 for_each_set_bit(bit, &asserted, 8) {
370 data = item->data + bit;
371 if (regval & BIT(bit)) {
373 mlxreg_hotplug_device_destroy(priv, data, item->kind);
375 mlxreg_hotplug_device_create(priv, data, item->kind);
378 mlxreg_hotplug_device_create(priv, data, item->kind);
380 mlxreg_hotplug_device_destroy(priv, data, item->kind);
384 /* Acknowledge event. */
385 ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_EVENT_OFF,
391 ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_MASK_OFF,
396 dev_err(priv->dev, "Failed to complete workqueue.\n");
400 mlxreg_hotplug_health_work_helper(struct mlxreg_hotplug_priv_data *priv,
401 struct mlxreg_core_item *item)
403 struct mlxreg_core_data *data = item->data;
407 for (i = 0; i < item->count; i++, data++) {
409 ret = regmap_write(priv->regmap, data->reg +
410 MLXREG_HOTPLUG_MASK_OFF, 0);
415 ret = regmap_read(priv->regmap, data->reg, ®val);
419 regval &= data->mask;
421 if (item->cache == regval)
425 * ASIC health indication is provided through two bits. Bits
426 * value 0x2 indicates that ASIC reached the good health, value
427 * 0x0 indicates ASIC the bad health or dormant state and value
428 * 0x3 indicates the booting state. During ASIC reset it should
429 * pass the following states: dormant -> booting -> good.
431 if (regval == MLXREG_HOTPLUG_GOOD_HEALTH_MASK) {
432 if (!data->attached) {
434 * ASIC is in steady state. Connect associated
435 * device, if configured.
437 mlxreg_hotplug_device_create(priv, data, item->kind);
438 data->attached = true;
441 if (data->attached) {
443 * ASIC health is failed after ASIC has been
444 * in steady state. Disconnect associated
445 * device, if it has been connected.
447 mlxreg_hotplug_device_destroy(priv, data, item->kind);
448 data->attached = false;
449 data->health_cntr = 0;
452 item->cache = regval;
454 /* Acknowledge event. */
455 ret = regmap_write(priv->regmap, data->reg +
456 MLXREG_HOTPLUG_EVENT_OFF, 0);
461 ret = regmap_write(priv->regmap, data->reg +
462 MLXREG_HOTPLUG_MASK_OFF, data->mask);
469 dev_err(priv->dev, "Failed to complete workqueue.\n");
473 * mlxreg_hotplug_work_handler - performs traversing of device interrupt
474 * registers according to the below hierarchy schema:
476 * Aggregation registers (status/mask)
477 * PSU registers: *---*
478 * *-----------------* | |
479 * |status/event/mask|-----> | * |
480 * *-----------------* | |
481 * Power registers: | |
482 * *-----------------* | |
483 * |status/event/mask|-----> | * |
484 * *-----------------* | |
485 * FAN registers: | |--> CPU
486 * *-----------------* | |
487 * |status/event/mask|-----> | * |
488 * *-----------------* | |
489 * ASIC registers: | |
490 * *-----------------* | |
491 * |status/event/mask|-----> | * |
492 * *-----------------* | |
495 * In case some system changed are detected: FAN in/out, PSU in/out, power
496 * cable attached/detached, ASIC health good/bad, relevant device is created
499 static void mlxreg_hotplug_work_handler(struct work_struct *work)
501 struct mlxreg_core_hotplug_platform_data *pdata;
502 struct mlxreg_hotplug_priv_data *priv;
503 struct mlxreg_core_item *item;
504 u32 regval, aggr_asserted;
508 priv = container_of(work, struct mlxreg_hotplug_priv_data,
510 pdata = dev_get_platdata(&priv->pdev->dev);
513 /* Mask aggregation event. */
514 ret = regmap_write(priv->regmap, pdata->cell +
515 MLXREG_HOTPLUG_AGGR_MASK_OFF, 0);
519 /* Read aggregation status. */
520 ret = regmap_read(priv->regmap, pdata->cell, ®val);
524 regval &= pdata->mask;
525 aggr_asserted = priv->aggr_cache ^ regval;
526 priv->aggr_cache = regval;
529 * Handler is invoked, but no assertion is detected at top aggregation
530 * status level. Set aggr_asserted to mask value to allow handler extra
531 * run over all relevant signals to recover any missed signal.
533 if (priv->not_asserted == MLXREG_HOTPLUG_NOT_ASSERT) {
534 priv->not_asserted = 0;
535 aggr_asserted = pdata->mask;
540 /* Handle topology and health configuration changes. */
541 for (i = 0; i < pdata->counter; i++, item++) {
542 if (aggr_asserted & item->aggr_mask) {
544 mlxreg_hotplug_health_work_helper(priv, item);
546 mlxreg_hotplug_work_helper(priv, item);
550 spin_lock_irqsave(&priv->lock, flags);
553 * It is possible, that some signals have been inserted, while
554 * interrupt has been masked by mlxreg_hotplug_work_handler. In this
555 * case such signals will be missed. In order to handle these signals
556 * delayed work is canceled and work task re-scheduled for immediate
557 * execution. It allows to handle missed signals, if any. In other case
558 * work handler just validates that no new signals have been received
561 cancel_delayed_work(&priv->dwork_irq);
562 schedule_delayed_work(&priv->dwork_irq, 0);
564 spin_unlock_irqrestore(&priv->lock, flags);
569 priv->not_asserted++;
570 /* Unmask aggregation event (no need acknowledge). */
571 ret = regmap_write(priv->regmap, pdata->cell +
572 MLXREG_HOTPLUG_AGGR_MASK_OFF, pdata->mask);
576 dev_err(priv->dev, "Failed to complete workqueue.\n");
579 static int mlxreg_hotplug_set_irq(struct mlxreg_hotplug_priv_data *priv)
581 struct mlxreg_core_hotplug_platform_data *pdata;
582 struct mlxreg_core_item *item;
583 struct mlxreg_core_data *data;
587 pdata = dev_get_platdata(&priv->pdev->dev);
590 for (i = 0; i < pdata->counter; i++, item++) {
591 /* Clear group presense event. */
592 ret = regmap_write(priv->regmap, item->reg +
593 MLXREG_HOTPLUG_EVENT_OFF, 0);
598 * Verify if hardware configuration requires to disable
599 * interrupt capability for some of components.
602 for (j = 0; j < item->count; j++, data++) {
603 /* Verify if the attribute has capability register. */
604 if (data->capability) {
605 /* Read capability register. */
606 ret = regmap_read(priv->regmap,
607 data->capability, ®val);
611 if (!(regval & data->bit))
612 item->mask &= ~BIT(j);
616 /* Set group initial status as mask and unmask group event. */
617 if (item->inversed) {
618 item->cache = item->mask;
619 ret = regmap_write(priv->regmap, item->reg +
620 MLXREG_HOTPLUG_MASK_OFF,
627 /* Keep aggregation initial status as zero and unmask events. */
628 ret = regmap_write(priv->regmap, pdata->cell +
629 MLXREG_HOTPLUG_AGGR_MASK_OFF, pdata->mask);
633 /* Keep low aggregation initial status as zero and unmask events. */
634 if (pdata->cell_low) {
635 ret = regmap_write(priv->regmap, pdata->cell_low +
636 MLXREG_HOTPLUG_AGGR_MASK_OFF,
642 /* Invoke work handler for initializing hot plug devices setting. */
643 mlxreg_hotplug_work_handler(&priv->dwork_irq.work);
647 dev_err(priv->dev, "Failed to set interrupts.\n");
648 enable_irq(priv->irq);
652 static void mlxreg_hotplug_unset_irq(struct mlxreg_hotplug_priv_data *priv)
654 struct mlxreg_core_hotplug_platform_data *pdata;
655 struct mlxreg_core_item *item;
656 struct mlxreg_core_data *data;
659 pdata = dev_get_platdata(&priv->pdev->dev);
661 disable_irq(priv->irq);
662 cancel_delayed_work_sync(&priv->dwork_irq);
664 /* Mask low aggregation event, if defined. */
666 regmap_write(priv->regmap, pdata->cell_low +
667 MLXREG_HOTPLUG_AGGR_MASK_OFF, 0);
669 /* Mask aggregation event. */
670 regmap_write(priv->regmap, pdata->cell + MLXREG_HOTPLUG_AGGR_MASK_OFF,
673 /* Clear topology configurations. */
674 for (i = 0; i < pdata->counter; i++, item++) {
676 /* Mask group presense event. */
677 regmap_write(priv->regmap, data->reg + MLXREG_HOTPLUG_MASK_OFF,
679 /* Clear group presense event. */
680 regmap_write(priv->regmap, data->reg +
681 MLXREG_HOTPLUG_EVENT_OFF, 0);
683 /* Remove all the attached devices in group. */
685 for (j = 0; j < count; j++, data++)
686 mlxreg_hotplug_device_destroy(priv, data, item->kind);
690 static irqreturn_t mlxreg_hotplug_irq_handler(int irq, void *dev)
692 struct mlxreg_hotplug_priv_data *priv;
694 priv = (struct mlxreg_hotplug_priv_data *)dev;
696 /* Schedule work task for immediate execution.*/
697 schedule_delayed_work(&priv->dwork_irq, 0);
702 static int mlxreg_hotplug_probe(struct platform_device *pdev)
704 struct mlxreg_core_hotplug_platform_data *pdata;
705 struct mlxreg_hotplug_priv_data *priv;
706 struct i2c_adapter *deferred_adap;
709 pdata = dev_get_platdata(&pdev->dev);
711 dev_err(&pdev->dev, "Failed to get platform data.\n");
715 /* Defer probing if the necessary adapter is not configured yet. */
716 deferred_adap = i2c_get_adapter(pdata->deferred_nr);
718 return -EPROBE_DEFER;
719 i2c_put_adapter(deferred_adap);
721 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
726 priv->irq = pdata->irq;
728 priv->irq = platform_get_irq(pdev, 0);
733 priv->regmap = pdata->regmap;
734 priv->dev = pdev->dev.parent;
737 err = devm_request_irq(&pdev->dev, priv->irq,
738 mlxreg_hotplug_irq_handler, IRQF_TRIGGER_FALLING
739 | IRQF_SHARED, "mlxreg-hotplug", priv);
741 dev_err(&pdev->dev, "Failed to request irq: %d\n", err);
745 disable_irq(priv->irq);
746 spin_lock_init(&priv->lock);
747 INIT_DELAYED_WORK(&priv->dwork_irq, mlxreg_hotplug_work_handler);
748 dev_set_drvdata(&pdev->dev, priv);
750 err = mlxreg_hotplug_attr_init(priv);
752 dev_err(&pdev->dev, "Failed to allocate attributes: %d\n",
757 priv->hwmon = devm_hwmon_device_register_with_groups(&pdev->dev,
758 "mlxreg_hotplug", priv, priv->groups);
759 if (IS_ERR(priv->hwmon)) {
760 dev_err(&pdev->dev, "Failed to register hwmon device %ld\n",
761 PTR_ERR(priv->hwmon));
762 return PTR_ERR(priv->hwmon);
765 /* Perform initial interrupts setup. */
766 mlxreg_hotplug_set_irq(priv);
767 priv->after_probe = true;
772 static int mlxreg_hotplug_remove(struct platform_device *pdev)
774 struct mlxreg_hotplug_priv_data *priv = dev_get_drvdata(&pdev->dev);
776 /* Clean interrupts setup. */
777 mlxreg_hotplug_unset_irq(priv);
778 devm_free_irq(&pdev->dev, priv->irq, priv);
783 static struct platform_driver mlxreg_hotplug_driver = {
785 .name = "mlxreg-hotplug",
787 .probe = mlxreg_hotplug_probe,
788 .remove = mlxreg_hotplug_remove,
791 module_platform_driver(mlxreg_hotplug_driver);
794 MODULE_DESCRIPTION("Mellanox regmap hotplug platform driver");
795 MODULE_LICENSE("Dual BSD/GPL");
796 MODULE_ALIAS("platform:mlxreg-hotplug");