1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2015-2018, Intel Corporation.
6 #define pr_fmt(fmt) "aspeed-kcs-bmc: " fmt
8 #include <linux/atomic.h>
9 #include <linux/errno.h>
10 #include <linux/interrupt.h>
12 #include <linux/irq.h>
13 #include <linux/mfd/syscon.h>
14 #include <linux/module.h>
16 #include <linux/of_address.h>
17 #include <linux/of_device.h>
18 #include <linux/platform_device.h>
19 #include <linux/poll.h>
20 #include <linux/regmap.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 #include <linux/timer.h>
25 #include "kcs_bmc_device.h"
28 #define DEVICE_NAME "ast-kcs-bmc"
30 #define KCS_CHANNEL_MAX 4
33 * Field class descriptions
35 * LPCyE Enable LPC channel y
36 * IBFIEy Input Buffer Full IRQ Enable for LPC channel y
37 * IRQxEy Assert SerIRQ x for LPC channel y (Deprecated, use IDyIRQX, IRQXEy)
38 * IDyIRQX Use the specified 4-bit SerIRQ for LPC channel y
39 * SELyIRQX SerIRQ polarity for LPC channel y (low: 0, high: 1)
40 * IRQXEy Assert the SerIRQ specified in IDyIRQX for LPC channel y
43 #define LPC_TYIRQX_LOW 0b00
44 #define LPC_TYIRQX_HIGH 0b01
45 #define LPC_TYIRQX_RSVD 0b10
46 #define LPC_TYIRQX_RISING 0b11
48 #define LPC_HICR0 0x000
49 #define LPC_HICR0_LPC3E BIT(7)
50 #define LPC_HICR0_LPC2E BIT(6)
51 #define LPC_HICR0_LPC1E BIT(5)
52 #define LPC_HICR2 0x008
53 #define LPC_HICR2_IBFIE3 BIT(3)
54 #define LPC_HICR2_IBFIE2 BIT(2)
55 #define LPC_HICR2_IBFIE1 BIT(1)
56 #define LPC_HICR4 0x010
57 #define LPC_HICR4_LADR12AS BIT(7)
58 #define LPC_HICR4_KCSENBL BIT(2)
59 #define LPC_SIRQCR0 0x070
60 /* IRQ{12,1}E1 are deprecated as of AST2600 A3 but necessary for prior chips */
61 #define LPC_SIRQCR0_IRQ12E1 BIT(1)
62 #define LPC_SIRQCR0_IRQ1E1 BIT(0)
63 #define LPC_HICR5 0x080
64 #define LPC_HICR5_ID3IRQX_MASK GENMASK(23, 20)
65 #define LPC_HICR5_ID3IRQX_SHIFT 20
66 #define LPC_HICR5_ID2IRQX_MASK GENMASK(19, 16)
67 #define LPC_HICR5_ID2IRQX_SHIFT 16
68 #define LPC_HICR5_SEL3IRQX BIT(15)
69 #define LPC_HICR5_IRQXE3 BIT(14)
70 #define LPC_HICR5_SEL2IRQX BIT(13)
71 #define LPC_HICR5_IRQXE2 BIT(12)
72 #define LPC_LADR3H 0x014
73 #define LPC_LADR3L 0x018
74 #define LPC_LADR12H 0x01C
75 #define LPC_LADR12L 0x020
76 #define LPC_IDR1 0x024
77 #define LPC_IDR2 0x028
78 #define LPC_IDR3 0x02C
79 #define LPC_ODR1 0x030
80 #define LPC_ODR2 0x034
81 #define LPC_ODR3 0x038
82 #define LPC_STR1 0x03C
83 #define LPC_STR2 0x040
84 #define LPC_STR3 0x044
85 #define LPC_HICRB 0x100
86 #define LPC_HICRB_EN16LADR2 BIT(5)
87 #define LPC_HICRB_EN16LADR1 BIT(4)
88 #define LPC_HICRB_IBFIE4 BIT(1)
89 #define LPC_HICRB_LPC4E BIT(0)
90 #define LPC_HICRC 0x104
91 #define LPC_HICRC_ID4IRQX_MASK GENMASK(7, 4)
92 #define LPC_HICRC_ID4IRQX_SHIFT 4
93 #define LPC_HICRC_TY4IRQX_MASK GENMASK(3, 2)
94 #define LPC_HICRC_TY4IRQX_SHIFT 2
95 #define LPC_HICRC_OBF4_AUTO_CLR BIT(1)
96 #define LPC_HICRC_IRQXE4 BIT(0)
97 #define LPC_LADR4 0x110
98 #define LPC_IDR4 0x114
99 #define LPC_ODR4 0x118
100 #define LPC_STR4 0x11C
101 #define LPC_LSADR12 0x120
102 #define LPC_LSADR12_LSADR2_MASK GENMASK(31, 16)
103 #define LPC_LSADR12_LSADR2_SHIFT 16
104 #define LPC_LSADR12_LSADR1_MASK GENMASK(15, 0)
105 #define LPC_LSADR12_LSADR1_SHIFT 0
107 #define OBE_POLL_PERIOD (HZ / 2)
109 enum aspeed_kcs_irq_mode {
111 aspeed_kcs_irq_serirq,
114 struct aspeed_kcs_bmc {
115 struct kcs_bmc_device kcs_bmc;
120 enum aspeed_kcs_irq_mode mode;
127 struct timer_list timer;
131 static inline struct aspeed_kcs_bmc *to_aspeed_kcs_bmc(struct kcs_bmc_device *kcs_bmc)
133 return container_of(kcs_bmc, struct aspeed_kcs_bmc, kcs_bmc);
136 static u8 aspeed_kcs_inb(struct kcs_bmc_device *kcs_bmc, u32 reg)
138 struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
142 rc = regmap_read(priv->map, reg, &val);
143 WARN(rc != 0, "regmap_read() failed: %d\n", rc);
145 return rc == 0 ? (u8) val : 0;
148 static void aspeed_kcs_outb(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 data)
150 struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
153 rc = regmap_write(priv->map, reg, data);
154 WARN(rc != 0, "regmap_write() failed: %d\n", rc);
156 /* Trigger the upstream IRQ on ODR writes, if enabled */
168 if (priv->upstream_irq.mode != aspeed_kcs_irq_serirq)
171 switch (kcs_bmc->channel) {
173 switch (priv->upstream_irq.id) {
175 regmap_update_bits(priv->map, LPC_SIRQCR0, LPC_SIRQCR0_IRQ12E1,
176 LPC_SIRQCR0_IRQ12E1);
179 regmap_update_bits(priv->map, LPC_SIRQCR0, LPC_SIRQCR0_IRQ1E1,
187 regmap_update_bits(priv->map, LPC_HICR5, LPC_HICR5_IRQXE2, LPC_HICR5_IRQXE2);
190 regmap_update_bits(priv->map, LPC_HICR5, LPC_HICR5_IRQXE3, LPC_HICR5_IRQXE3);
193 regmap_update_bits(priv->map, LPC_HICRC, LPC_HICRC_IRQXE4, LPC_HICRC_IRQXE4);
200 static void aspeed_kcs_updateb(struct kcs_bmc_device *kcs_bmc, u32 reg, u8 mask, u8 val)
202 struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
205 rc = regmap_update_bits(priv->map, reg, mask, val);
206 WARN(rc != 0, "regmap_update_bits() failed: %d\n", rc);
210 * We note D for Data, and C for Cmd/Status, default rules are
212 * 1. Only the D address is given:
213 * A. KCS1/KCS2 (D/C: X/X+4)
216 * B. KCS3 (D/C: XX2/XX3h)
218 * C. KCS4 (D/C: X/X+1)
221 * 2. Both the D/C addresses are given:
222 * A. KCS1/KCS2/KCS4 (D/C: X/Y)
226 * B. KCS3 (D/C: XX2/XX3h)
229 static int aspeed_kcs_set_address(struct kcs_bmc_device *kcs_bmc, u32 addrs[2], int nr_addrs)
231 struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
233 if (WARN_ON(nr_addrs < 1 || nr_addrs > 2))
236 switch (priv->kcs_bmc.channel) {
238 regmap_update_bits(priv->map, LPC_HICR4, LPC_HICR4_LADR12AS, 0);
239 regmap_write(priv->map, LPC_LADR12H, addrs[0] >> 8);
240 regmap_write(priv->map, LPC_LADR12L, addrs[0] & 0xFF);
242 regmap_update_bits(priv->map, LPC_LSADR12, LPC_LSADR12_LSADR1_MASK,
243 addrs[1] << LPC_LSADR12_LSADR1_SHIFT);
245 regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_EN16LADR1,
246 LPC_HICRB_EN16LADR1);
251 regmap_update_bits(priv->map, LPC_HICR4, LPC_HICR4_LADR12AS, LPC_HICR4_LADR12AS);
252 regmap_write(priv->map, LPC_LADR12H, addrs[0] >> 8);
253 regmap_write(priv->map, LPC_LADR12L, addrs[0] & 0xFF);
255 regmap_update_bits(priv->map, LPC_LSADR12, LPC_LSADR12_LSADR2_MASK,
256 addrs[1] << LPC_LSADR12_LSADR2_SHIFT);
258 regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_EN16LADR2,
259 LPC_HICRB_EN16LADR2);
265 dev_err(priv->kcs_bmc.dev,
266 "Channel 3 only supports inferred status IO address\n");
270 regmap_write(priv->map, LPC_LADR3H, addrs[0] >> 8);
271 regmap_write(priv->map, LPC_LADR3L, addrs[0] & 0xFF);
276 regmap_write(priv->map, LPC_LADR4, ((addrs[0] + 1) << 16) | addrs[0]);
278 regmap_write(priv->map, LPC_LADR4, (addrs[1] << 16) | addrs[0]);
289 static inline int aspeed_kcs_map_serirq_type(u32 dt_type)
292 case IRQ_TYPE_EDGE_RISING:
293 return LPC_TYIRQX_RISING;
294 case IRQ_TYPE_LEVEL_HIGH:
295 return LPC_TYIRQX_HIGH;
296 case IRQ_TYPE_LEVEL_LOW:
297 return LPC_TYIRQX_LOW;
303 static int aspeed_kcs_config_upstream_irq(struct aspeed_kcs_bmc *priv, u32 id, u32 dt_type)
305 unsigned int mask, val, hw_type;
311 ret = aspeed_kcs_map_serirq_type(dt_type);
316 priv->upstream_irq.mode = aspeed_kcs_irq_serirq;
317 priv->upstream_irq.id = id;
319 switch (priv->kcs_bmc.channel) {
321 /* Needs IRQxE1 rather than (ID1IRQX, SEL1IRQX, IRQXE1) before AST2600 A3 */
324 if (!(hw_type == LPC_TYIRQX_LOW || hw_type == LPC_TYIRQX_HIGH))
327 mask = LPC_HICR5_SEL2IRQX | LPC_HICR5_ID2IRQX_MASK;
328 val = (id << LPC_HICR5_ID2IRQX_SHIFT);
329 val |= (hw_type == LPC_TYIRQX_HIGH) ? LPC_HICR5_SEL2IRQX : 0;
330 regmap_update_bits(priv->map, LPC_HICR5, mask, val);
334 if (!(hw_type == LPC_TYIRQX_LOW || hw_type == LPC_TYIRQX_HIGH))
337 mask = LPC_HICR5_SEL3IRQX | LPC_HICR5_ID3IRQX_MASK;
338 val = (id << LPC_HICR5_ID3IRQX_SHIFT);
339 val |= (hw_type == LPC_TYIRQX_HIGH) ? LPC_HICR5_SEL3IRQX : 0;
340 regmap_update_bits(priv->map, LPC_HICR5, mask, val);
344 mask = LPC_HICRC_ID4IRQX_MASK | LPC_HICRC_TY4IRQX_MASK | LPC_HICRC_OBF4_AUTO_CLR;
345 val = (id << LPC_HICRC_ID4IRQX_SHIFT) | (hw_type << LPC_HICRC_TY4IRQX_SHIFT);
346 regmap_update_bits(priv->map, LPC_HICRC, mask, val);
349 dev_warn(priv->kcs_bmc.dev,
350 "SerIRQ configuration not supported on KCS channel %d\n",
351 priv->kcs_bmc.channel);
358 static void aspeed_kcs_enable_channel(struct kcs_bmc_device *kcs_bmc, bool enable)
360 struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
362 switch (kcs_bmc->channel) {
364 regmap_update_bits(priv->map, LPC_HICR0, LPC_HICR0_LPC1E, enable * LPC_HICR0_LPC1E);
367 regmap_update_bits(priv->map, LPC_HICR0, LPC_HICR0_LPC2E, enable * LPC_HICR0_LPC2E);
370 regmap_update_bits(priv->map, LPC_HICR0, LPC_HICR0_LPC3E, enable * LPC_HICR0_LPC3E);
371 regmap_update_bits(priv->map, LPC_HICR4,
372 LPC_HICR4_KCSENBL, enable * LPC_HICR4_KCSENBL);
375 regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_LPC4E, enable * LPC_HICRB_LPC4E);
378 pr_warn("%s: Unsupported channel: %d", __func__, kcs_bmc->channel);
383 static void aspeed_kcs_check_obe(struct timer_list *timer)
385 struct aspeed_kcs_bmc *priv = container_of(timer, struct aspeed_kcs_bmc, obe.timer);
389 spin_lock_irqsave(&priv->obe.lock, flags);
390 if (priv->obe.remove) {
391 spin_unlock_irqrestore(&priv->obe.lock, flags);
395 str = aspeed_kcs_inb(&priv->kcs_bmc, priv->kcs_bmc.ioreg.str);
396 if (str & KCS_BMC_STR_OBF) {
397 mod_timer(timer, jiffies + OBE_POLL_PERIOD);
398 spin_unlock_irqrestore(&priv->obe.lock, flags);
401 spin_unlock_irqrestore(&priv->obe.lock, flags);
403 kcs_bmc_handle_event(&priv->kcs_bmc);
406 static void aspeed_kcs_irq_mask_update(struct kcs_bmc_device *kcs_bmc, u8 mask, u8 state)
408 struct aspeed_kcs_bmc *priv = to_aspeed_kcs_bmc(kcs_bmc);
412 /* We don't have an OBE IRQ, emulate it */
413 if (mask & KCS_BMC_EVENT_TYPE_OBE) {
414 if (KCS_BMC_EVENT_TYPE_OBE & state) {
416 * Given we don't have an OBE IRQ, delay by polling briefly to see if we can
417 * observe such an event before returning to the caller. This is not
418 * incorrect because OBF may have already become clear before enabling the
419 * IRQ if we had one, under which circumstance no event will be propagated
422 * The onus is on the client to perform a race-free check that it hasn't
425 rc = read_poll_timeout_atomic(aspeed_kcs_inb, str,
426 !(str & KCS_BMC_STR_OBF), 1, 100, false,
427 &priv->kcs_bmc, priv->kcs_bmc.ioreg.str);
428 /* Time for the slow path? */
429 if (rc == -ETIMEDOUT)
430 mod_timer(&priv->obe.timer, jiffies + OBE_POLL_PERIOD);
432 del_timer(&priv->obe.timer);
436 if (mask & KCS_BMC_EVENT_TYPE_IBF) {
437 const bool enable = !!(state & KCS_BMC_EVENT_TYPE_IBF);
439 switch (kcs_bmc->channel) {
441 regmap_update_bits(priv->map, LPC_HICR2, LPC_HICR2_IBFIE1,
442 enable * LPC_HICR2_IBFIE1);
445 regmap_update_bits(priv->map, LPC_HICR2, LPC_HICR2_IBFIE2,
446 enable * LPC_HICR2_IBFIE2);
449 regmap_update_bits(priv->map, LPC_HICR2, LPC_HICR2_IBFIE3,
450 enable * LPC_HICR2_IBFIE3);
453 regmap_update_bits(priv->map, LPC_HICRB, LPC_HICRB_IBFIE4,
454 enable * LPC_HICRB_IBFIE4);
457 pr_warn("%s: Unsupported channel: %d", __func__, kcs_bmc->channel);
463 static const struct kcs_bmc_device_ops aspeed_kcs_ops = {
464 .irq_mask_update = aspeed_kcs_irq_mask_update,
465 .io_inputb = aspeed_kcs_inb,
466 .io_outputb = aspeed_kcs_outb,
467 .io_updateb = aspeed_kcs_updateb,
470 static irqreturn_t aspeed_kcs_irq(int irq, void *arg)
472 struct kcs_bmc_device *kcs_bmc = arg;
474 return kcs_bmc_handle_event(kcs_bmc);
477 static int aspeed_kcs_config_downstream_irq(struct kcs_bmc_device *kcs_bmc,
478 struct platform_device *pdev)
480 struct device *dev = &pdev->dev;
483 irq = platform_get_irq(pdev, 0);
487 return devm_request_irq(dev, irq, aspeed_kcs_irq, IRQF_SHARED,
488 dev_name(dev), kcs_bmc);
491 static const struct kcs_ioreg ast_kcs_bmc_ioregs[KCS_CHANNEL_MAX] = {
492 { .idr = LPC_IDR1, .odr = LPC_ODR1, .str = LPC_STR1 },
493 { .idr = LPC_IDR2, .odr = LPC_ODR2, .str = LPC_STR2 },
494 { .idr = LPC_IDR3, .odr = LPC_ODR3, .str = LPC_STR3 },
495 { .idr = LPC_IDR4, .odr = LPC_ODR4, .str = LPC_STR4 },
498 static int aspeed_kcs_of_get_channel(struct platform_device *pdev)
500 struct device_node *np;
501 struct kcs_ioreg ioreg;
505 np = pdev->dev.of_node;
507 /* Don't translate addresses, we want offsets for the regmaps */
508 reg = of_get_address(np, 0, NULL, NULL);
511 ioreg.idr = be32_to_cpup(reg);
513 reg = of_get_address(np, 1, NULL, NULL);
516 ioreg.odr = be32_to_cpup(reg);
518 reg = of_get_address(np, 2, NULL, NULL);
521 ioreg.str = be32_to_cpup(reg);
523 for (i = 0; i < ARRAY_SIZE(ast_kcs_bmc_ioregs); i++) {
524 if (!memcmp(&ast_kcs_bmc_ioregs[i], &ioreg, sizeof(ioreg)))
531 aspeed_kcs_of_get_io_address(struct platform_device *pdev, u32 addrs[2])
535 rc = of_property_read_variable_u32_array(pdev->dev.of_node,
539 dev_err(&pdev->dev, "No valid 'aspeed,lpc-io-reg' configured\n");
543 if (addrs[0] > 0xffff) {
544 dev_err(&pdev->dev, "Invalid data address in 'aspeed,lpc-io-reg'\n");
548 if (rc == 2 && addrs[1] > 0xffff) {
549 dev_err(&pdev->dev, "Invalid status address in 'aspeed,lpc-io-reg'\n");
556 static int aspeed_kcs_probe(struct platform_device *pdev)
558 struct kcs_bmc_device *kcs_bmc;
559 struct aspeed_kcs_bmc *priv;
560 struct device_node *np;
561 bool have_upstream_irq;
567 np = pdev->dev.of_node->parent;
568 if (!of_device_is_compatible(np, "aspeed,ast2400-lpc-v2") &&
569 !of_device_is_compatible(np, "aspeed,ast2500-lpc-v2") &&
570 !of_device_is_compatible(np, "aspeed,ast2600-lpc-v2")) {
571 dev_err(&pdev->dev, "unsupported LPC device binding\n");
575 channel = aspeed_kcs_of_get_channel(pdev);
579 nr_addrs = aspeed_kcs_of_get_io_address(pdev, addrs);
583 np = pdev->dev.of_node;
584 rc = of_property_read_u32_array(np, "aspeed,lpc-interrupts", upstream_irq, 2);
585 if (rc && rc != -EINVAL)
588 have_upstream_irq = !rc;
590 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
594 kcs_bmc = &priv->kcs_bmc;
595 kcs_bmc->dev = &pdev->dev;
596 kcs_bmc->channel = channel;
597 kcs_bmc->ioreg = ast_kcs_bmc_ioregs[channel - 1];
598 kcs_bmc->ops = &aspeed_kcs_ops;
600 priv->map = syscon_node_to_regmap(pdev->dev.parent->of_node);
601 if (IS_ERR(priv->map)) {
602 dev_err(&pdev->dev, "Couldn't get regmap\n");
606 spin_lock_init(&priv->obe.lock);
607 priv->obe.remove = false;
608 timer_setup(&priv->obe.timer, aspeed_kcs_check_obe, 0);
610 rc = aspeed_kcs_set_address(kcs_bmc, addrs, nr_addrs);
614 /* Host to BMC IRQ */
615 rc = aspeed_kcs_config_downstream_irq(kcs_bmc, pdev);
619 /* BMC to Host IRQ */
620 if (have_upstream_irq) {
621 rc = aspeed_kcs_config_upstream_irq(priv, upstream_irq[0], upstream_irq[1]);
625 priv->upstream_irq.mode = aspeed_kcs_irq_none;
628 platform_set_drvdata(pdev, priv);
630 aspeed_kcs_irq_mask_update(kcs_bmc, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE), 0);
631 aspeed_kcs_enable_channel(kcs_bmc, true);
633 rc = kcs_bmc_add_device(&priv->kcs_bmc);
635 dev_warn(&pdev->dev, "Failed to register channel %d: %d\n", kcs_bmc->channel, rc);
639 dev_info(&pdev->dev, "Initialised channel %d at 0x%x\n",
640 kcs_bmc->channel, addrs[0]);
645 static int aspeed_kcs_remove(struct platform_device *pdev)
647 struct aspeed_kcs_bmc *priv = platform_get_drvdata(pdev);
648 struct kcs_bmc_device *kcs_bmc = &priv->kcs_bmc;
650 kcs_bmc_remove_device(kcs_bmc);
652 aspeed_kcs_enable_channel(kcs_bmc, false);
653 aspeed_kcs_irq_mask_update(kcs_bmc, (KCS_BMC_EVENT_TYPE_IBF | KCS_BMC_EVENT_TYPE_OBE), 0);
655 /* Make sure it's proper dead */
656 spin_lock_irq(&priv->obe.lock);
657 priv->obe.remove = true;
658 spin_unlock_irq(&priv->obe.lock);
659 del_timer_sync(&priv->obe.timer);
664 static const struct of_device_id ast_kcs_bmc_match[] = {
665 { .compatible = "aspeed,ast2400-kcs-bmc-v2" },
666 { .compatible = "aspeed,ast2500-kcs-bmc-v2" },
667 { .compatible = "aspeed,ast2600-kcs-bmc" },
670 MODULE_DEVICE_TABLE(of, ast_kcs_bmc_match);
672 static struct platform_driver ast_kcs_bmc_driver = {
675 .of_match_table = ast_kcs_bmc_match,
677 .probe = aspeed_kcs_probe,
678 .remove = aspeed_kcs_remove,
680 module_platform_driver(ast_kcs_bmc_driver);
682 MODULE_LICENSE("GPL v2");
685 MODULE_DESCRIPTION("Aspeed device interface to the KCS BMC device");