1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
3 // Copyright (c) 2017-20 Linaro Limited.
6 #include <linux/completion.h>
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_runtime.h>
15 #define CCI_HW_VERSION 0x0
16 #define CCI_RESET_CMD 0x004
17 #define CCI_RESET_CMD_MASK 0x0f73f3f7
18 #define CCI_RESET_CMD_M0_MASK 0x000003f1
19 #define CCI_RESET_CMD_M1_MASK 0x0003f001
20 #define CCI_QUEUE_START 0x008
21 #define CCI_HALT_REQ 0x034
22 #define CCI_HALT_REQ_I2C_M0_Q0Q1 BIT(0)
23 #define CCI_HALT_REQ_I2C_M1_Q0Q1 BIT(1)
25 #define CCI_I2C_Mm_SCL_CTL(m) (0x100 + 0x100 * (m))
26 #define CCI_I2C_Mm_SDA_CTL_0(m) (0x104 + 0x100 * (m))
27 #define CCI_I2C_Mm_SDA_CTL_1(m) (0x108 + 0x100 * (m))
28 #define CCI_I2C_Mm_SDA_CTL_2(m) (0x10c + 0x100 * (m))
29 #define CCI_I2C_Mm_MISC_CTL(m) (0x110 + 0x100 * (m))
31 #define CCI_I2C_Mm_READ_DATA(m) (0x118 + 0x100 * (m))
32 #define CCI_I2C_Mm_READ_BUF_LEVEL(m) (0x11c + 0x100 * (m))
33 #define CCI_I2C_Mm_Qn_EXEC_WORD_CNT(m, n) (0x300 + 0x200 * (m) + 0x100 * (n))
34 #define CCI_I2C_Mm_Qn_CUR_WORD_CNT(m, n) (0x304 + 0x200 * (m) + 0x100 * (n))
35 #define CCI_I2C_Mm_Qn_CUR_CMD(m, n) (0x308 + 0x200 * (m) + 0x100 * (n))
36 #define CCI_I2C_Mm_Qn_REPORT_STATUS(m, n) (0x30c + 0x200 * (m) + 0x100 * (n))
37 #define CCI_I2C_Mm_Qn_LOAD_DATA(m, n) (0x310 + 0x200 * (m) + 0x100 * (n))
39 #define CCI_IRQ_GLOBAL_CLEAR_CMD 0xc00
40 #define CCI_IRQ_MASK_0 0xc04
41 #define CCI_IRQ_MASK_0_I2C_M0_RD_DONE BIT(0)
42 #define CCI_IRQ_MASK_0_I2C_M0_Q0_REPORT BIT(4)
43 #define CCI_IRQ_MASK_0_I2C_M0_Q1_REPORT BIT(8)
44 #define CCI_IRQ_MASK_0_I2C_M1_RD_DONE BIT(12)
45 #define CCI_IRQ_MASK_0_I2C_M1_Q0_REPORT BIT(16)
46 #define CCI_IRQ_MASK_0_I2C_M1_Q1_REPORT BIT(20)
47 #define CCI_IRQ_MASK_0_RST_DONE_ACK BIT(24)
48 #define CCI_IRQ_MASK_0_I2C_M0_Q0Q1_HALT_ACK BIT(25)
49 #define CCI_IRQ_MASK_0_I2C_M1_Q0Q1_HALT_ACK BIT(26)
50 #define CCI_IRQ_MASK_0_I2C_M0_ERROR 0x18000ee6
51 #define CCI_IRQ_MASK_0_I2C_M1_ERROR 0x60ee6000
52 #define CCI_IRQ_CLEAR_0 0xc08
53 #define CCI_IRQ_STATUS_0 0xc0c
54 #define CCI_IRQ_STATUS_0_I2C_M0_RD_DONE BIT(0)
55 #define CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT BIT(4)
56 #define CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT BIT(8)
57 #define CCI_IRQ_STATUS_0_I2C_M1_RD_DONE BIT(12)
58 #define CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT BIT(16)
59 #define CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT BIT(20)
60 #define CCI_IRQ_STATUS_0_RST_DONE_ACK BIT(24)
61 #define CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK BIT(25)
62 #define CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK BIT(26)
63 #define CCI_IRQ_STATUS_0_I2C_M0_Q0_NACK_ERR BIT(27)
64 #define CCI_IRQ_STATUS_0_I2C_M0_Q1_NACK_ERR BIT(28)
65 #define CCI_IRQ_STATUS_0_I2C_M1_Q0_NACK_ERR BIT(29)
66 #define CCI_IRQ_STATUS_0_I2C_M1_Q1_NACK_ERR BIT(30)
67 #define CCI_IRQ_STATUS_0_I2C_M0_ERROR 0x18000ee6
68 #define CCI_IRQ_STATUS_0_I2C_M1_ERROR 0x60ee6000
70 #define CCI_TIMEOUT (msecs_to_jiffies(100))
74 /* Max number of resources + 1 for a NULL terminator */
77 #define CCI_I2C_SET_PARAM 1
78 #define CCI_I2C_REPORT 8
79 #define CCI_I2C_WRITE 9
80 #define CCI_I2C_READ 10
82 #define CCI_I2C_REPORT_IRQ_EN BIT(8)
90 enum cci_i2c_queue_t {
96 u16 thigh; /* HIGH period of the SCL clock in clock ticks */
97 u16 tlow; /* LOW period of the SCL clock */
98 u16 tsu_sto; /* set-up time for STOP condition */
99 u16 tsu_sta; /* set-up time for a repeated START condition */
100 u16 thd_dat; /* data hold time */
101 u16 thd_sta; /* hold time (repeated) START condition */
102 u16 tbuf; /* bus free time between a STOP and START condition */
105 u16 tsp; /* pulse width of spikes suppressed by the input filter */
111 struct i2c_adapter adap;
115 struct completion irq_complete;
120 unsigned int num_masters;
121 struct i2c_adapter_quirks quirks;
122 u16 queue_size[NUM_QUEUES];
123 unsigned long cci_clk_rate;
124 struct hw_params params[3];
131 const struct cci_data *data;
132 struct clk_bulk_data *clocks;
134 struct cci_master master[NUM_MASTERS];
137 static irqreturn_t cci_isr(int irq, void *dev)
139 struct cci *cci = dev;
143 val = readl(cci->base + CCI_IRQ_STATUS_0);
144 writel(val, cci->base + CCI_IRQ_CLEAR_0);
145 writel(0x1, cci->base + CCI_IRQ_GLOBAL_CLEAR_CMD);
147 if (val & CCI_IRQ_STATUS_0_RST_DONE_ACK) {
148 complete(&cci->master[0].irq_complete);
149 if (cci->master[1].master)
150 complete(&cci->master[1].irq_complete);
154 if (val & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE ||
155 val & CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT ||
156 val & CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT) {
157 cci->master[0].status = 0;
158 complete(&cci->master[0].irq_complete);
162 if (val & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE ||
163 val & CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT ||
164 val & CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT) {
165 cci->master[1].status = 0;
166 complete(&cci->master[1].irq_complete);
170 if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK)) {
171 reset = CCI_RESET_CMD_M0_MASK;
175 if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK)) {
176 reset = CCI_RESET_CMD_M1_MASK;
181 writel(reset, cci->base + CCI_RESET_CMD);
183 if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M0_ERROR)) {
184 if (val & CCI_IRQ_STATUS_0_I2C_M0_Q0_NACK_ERR ||
185 val & CCI_IRQ_STATUS_0_I2C_M0_Q1_NACK_ERR)
186 cci->master[0].status = -ENXIO;
188 cci->master[0].status = -EIO;
190 writel(CCI_HALT_REQ_I2C_M0_Q0Q1, cci->base + CCI_HALT_REQ);
194 if (unlikely(val & CCI_IRQ_STATUS_0_I2C_M1_ERROR)) {
195 if (val & CCI_IRQ_STATUS_0_I2C_M1_Q0_NACK_ERR ||
196 val & CCI_IRQ_STATUS_0_I2C_M1_Q1_NACK_ERR)
197 cci->master[1].status = -ENXIO;
199 cci->master[1].status = -EIO;
201 writel(CCI_HALT_REQ_I2C_M1_Q0Q1, cci->base + CCI_HALT_REQ);
208 static int cci_halt(struct cci *cci, u8 master_num)
210 struct cci_master *master;
213 if (master_num >= cci->data->num_masters) {
214 dev_err(cci->dev, "Unsupported master idx (%u)\n", master_num);
218 val = BIT(master_num);
219 master = &cci->master[master_num];
221 reinit_completion(&master->irq_complete);
222 writel(val, cci->base + CCI_HALT_REQ);
224 if (!wait_for_completion_timeout(&master->irq_complete, CCI_TIMEOUT)) {
225 dev_err(cci->dev, "CCI halt timeout\n");
232 static int cci_reset(struct cci *cci)
235 * we reset the whole controller, here and for implicity use
236 * master[0].xxx for waiting on it.
238 reinit_completion(&cci->master[0].irq_complete);
239 writel(CCI_RESET_CMD_MASK, cci->base + CCI_RESET_CMD);
241 if (!wait_for_completion_timeout(&cci->master[0].irq_complete,
243 dev_err(cci->dev, "CCI reset timeout\n");
250 static int cci_init(struct cci *cci)
252 u32 val = CCI_IRQ_MASK_0_I2C_M0_RD_DONE |
253 CCI_IRQ_MASK_0_I2C_M0_Q0_REPORT |
254 CCI_IRQ_MASK_0_I2C_M0_Q1_REPORT |
255 CCI_IRQ_MASK_0_I2C_M1_RD_DONE |
256 CCI_IRQ_MASK_0_I2C_M1_Q0_REPORT |
257 CCI_IRQ_MASK_0_I2C_M1_Q1_REPORT |
258 CCI_IRQ_MASK_0_RST_DONE_ACK |
259 CCI_IRQ_MASK_0_I2C_M0_Q0Q1_HALT_ACK |
260 CCI_IRQ_MASK_0_I2C_M1_Q0Q1_HALT_ACK |
261 CCI_IRQ_MASK_0_I2C_M0_ERROR |
262 CCI_IRQ_MASK_0_I2C_M1_ERROR;
265 writel(val, cci->base + CCI_IRQ_MASK_0);
267 for (i = 0; i < cci->data->num_masters; i++) {
268 int mode = cci->master[i].mode;
269 const struct hw_params *hw;
271 if (!cci->master[i].cci)
274 hw = &cci->data->params[mode];
276 val = hw->thigh << 16 | hw->tlow;
277 writel(val, cci->base + CCI_I2C_Mm_SCL_CTL(i));
279 val = hw->tsu_sto << 16 | hw->tsu_sta;
280 writel(val, cci->base + CCI_I2C_Mm_SDA_CTL_0(i));
282 val = hw->thd_dat << 16 | hw->thd_sta;
283 writel(val, cci->base + CCI_I2C_Mm_SDA_CTL_1(i));
286 writel(val, cci->base + CCI_I2C_Mm_SDA_CTL_2(i));
288 val = hw->scl_stretch_en << 8 | hw->trdhld << 4 | hw->tsp;
289 writel(val, cci->base + CCI_I2C_Mm_MISC_CTL(i));
295 static int cci_run_queue(struct cci *cci, u8 master, u8 queue)
299 val = readl(cci->base + CCI_I2C_Mm_Qn_CUR_WORD_CNT(master, queue));
300 writel(val, cci->base + CCI_I2C_Mm_Qn_EXEC_WORD_CNT(master, queue));
302 reinit_completion(&cci->master[master].irq_complete);
303 val = BIT(master * 2 + queue);
304 writel(val, cci->base + CCI_QUEUE_START);
306 if (!wait_for_completion_timeout(&cci->master[master].irq_complete,
308 dev_err(cci->dev, "master %d queue %d timeout\n",
315 return cci->master[master].status;
318 static int cci_validate_queue(struct cci *cci, u8 master, u8 queue)
322 val = readl(cci->base + CCI_I2C_Mm_Qn_CUR_WORD_CNT(master, queue));
323 if (val == cci->data->queue_size[queue])
329 val = CCI_I2C_REPORT | CCI_I2C_REPORT_IRQ_EN;
330 writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue));
332 return cci_run_queue(cci, master, queue);
335 static int cci_i2c_read(struct cci *cci, u16 master,
336 u16 addr, u8 *buf, u16 len)
338 u32 val, words_read, words_exp;
340 int i, index = 0, ret;
344 * Call validate queue to make sure queue is empty before starting.
345 * This is to avoid overflow / underflow of queue.
347 ret = cci_validate_queue(cci, master, queue);
351 val = CCI_I2C_SET_PARAM | (addr & 0x7f) << 4;
352 writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue));
354 val = CCI_I2C_READ | len << 4;
355 writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue));
357 ret = cci_run_queue(cci, master, queue);
361 words_read = readl(cci->base + CCI_I2C_Mm_READ_BUF_LEVEL(master));
362 words_exp = len / 4 + 1;
363 if (words_read != words_exp) {
364 dev_err(cci->dev, "words read = %d, words expected = %d\n",
365 words_read, words_exp);
370 val = readl(cci->base + CCI_I2C_Mm_READ_DATA(master));
372 for (i = 0; i < 4 && index < len; i++) {
374 /* The LS byte of this register represents the
375 * first byte read from the slave during a read
381 buf[index++] = (val >> (i * 8)) & 0xff;
383 } while (--words_read);
388 static int cci_i2c_write(struct cci *cci, u16 master,
389 u16 addr, u8 *buf, u16 len)
397 * Call validate queue to make sure queue is empty before starting.
398 * This is to avoid overflow / underflow of queue.
400 ret = cci_validate_queue(cci, master, queue);
404 val = CCI_I2C_SET_PARAM | (addr & 0x7f) << 4;
405 writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue));
407 load[i++] = CCI_I2C_WRITE | len << 4;
409 for (j = 0; j < len; j++)
412 for (j = 0; j < i; j += 4) {
414 val |= load[j + 1] << 8;
415 val |= load[j + 2] << 16;
416 val |= load[j + 3] << 24;
417 writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue));
420 val = CCI_I2C_REPORT | CCI_I2C_REPORT_IRQ_EN;
421 writel(val, cci->base + CCI_I2C_Mm_Qn_LOAD_DATA(master, queue));
423 return cci_run_queue(cci, master, queue);
426 static int cci_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
428 struct cci_master *cci_master = i2c_get_adapdata(adap);
429 struct cci *cci = cci_master->cci;
432 ret = pm_runtime_get_sync(cci->dev);
436 for (i = 0; i < num; i++) {
437 if (msgs[i].flags & I2C_M_RD)
438 ret = cci_i2c_read(cci, cci_master->master,
439 msgs[i].addr, msgs[i].buf,
442 ret = cci_i2c_write(cci, cci_master->master,
443 msgs[i].addr, msgs[i].buf,
454 pm_runtime_mark_last_busy(cci->dev);
455 pm_runtime_put_autosuspend(cci->dev);
460 static u32 cci_func(struct i2c_adapter *adap)
462 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
465 static const struct i2c_algorithm cci_algo = {
466 .master_xfer = cci_xfer,
467 .functionality = cci_func,
470 static int cci_enable_clocks(struct cci *cci)
472 return clk_bulk_prepare_enable(cci->nclocks, cci->clocks);
475 static void cci_disable_clocks(struct cci *cci)
477 clk_bulk_disable_unprepare(cci->nclocks, cci->clocks);
480 static int __maybe_unused cci_suspend_runtime(struct device *dev)
482 struct cci *cci = dev_get_drvdata(dev);
484 cci_disable_clocks(cci);
488 static int __maybe_unused cci_resume_runtime(struct device *dev)
490 struct cci *cci = dev_get_drvdata(dev);
493 ret = cci_enable_clocks(cci);
501 static int __maybe_unused cci_suspend(struct device *dev)
503 if (!pm_runtime_suspended(dev))
504 return cci_suspend_runtime(dev);
509 static int __maybe_unused cci_resume(struct device *dev)
511 cci_resume_runtime(dev);
512 pm_runtime_mark_last_busy(dev);
513 pm_request_autosuspend(dev);
518 static const struct dev_pm_ops qcom_cci_pm = {
519 SET_SYSTEM_SLEEP_PM_OPS(cci_suspend, cci_resume)
520 SET_RUNTIME_PM_OPS(cci_suspend_runtime, cci_resume_runtime, NULL)
523 static int cci_probe(struct platform_device *pdev)
525 struct device *dev = &pdev->dev;
526 unsigned long cci_clk_rate = 0;
527 struct device_node *child;
533 cci = devm_kzalloc(dev, sizeof(*cci), GFP_KERNEL);
538 platform_set_drvdata(pdev, cci);
539 cci->data = device_get_match_data(dev);
543 for_each_available_child_of_node(dev->of_node, child) {
546 ret = of_property_read_u32(child, "reg", &idx);
548 dev_err(dev, "%pOF invalid 'reg' property", child);
552 if (idx >= cci->data->num_masters) {
553 dev_err(dev, "%pOF invalid 'reg' value: %u (max is %u)",
554 child, idx, cci->data->num_masters - 1);
558 cci->master[idx].adap.quirks = &cci->data->quirks;
559 cci->master[idx].adap.algo = &cci_algo;
560 cci->master[idx].adap.dev.parent = dev;
561 cci->master[idx].adap.dev.of_node = child;
562 cci->master[idx].master = idx;
563 cci->master[idx].cci = cci;
565 i2c_set_adapdata(&cci->master[idx].adap, &cci->master[idx]);
566 snprintf(cci->master[idx].adap.name,
567 sizeof(cci->master[idx].adap.name), "Qualcomm-CCI");
569 cci->master[idx].mode = I2C_MODE_STANDARD;
570 ret = of_property_read_u32(child, "clock-frequency", &val);
572 if (val == I2C_MAX_FAST_MODE_FREQ)
573 cci->master[idx].mode = I2C_MODE_FAST;
574 else if (val == I2C_MAX_FAST_MODE_PLUS_FREQ)
575 cci->master[idx].mode = I2C_MODE_FAST_PLUS;
578 init_completion(&cci->master[idx].irq_complete);
583 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
584 cci->base = devm_ioremap_resource(dev, r);
585 if (IS_ERR(cci->base))
586 return PTR_ERR(cci->base);
590 ret = devm_clk_bulk_get_all(dev, &cci->clocks);
592 dev_err(dev, "failed to get clocks %d\n", ret);
597 /* Retrieve CCI clock rate */
598 for (i = 0; i < cci->nclocks; i++) {
599 if (!strcmp(cci->clocks[i].id, "cci")) {
600 cci_clk_rate = clk_get_rate(cci->clocks[i].clk);
605 if (cci_clk_rate != cci->data->cci_clk_rate) {
606 /* cci clock set by the bootloader or via assigned clock rate
609 dev_warn(dev, "Found %lu cci clk rate while %lu was expected\n",
610 cci_clk_rate, cci->data->cci_clk_rate);
613 ret = cci_enable_clocks(cci);
619 ret = platform_get_irq(pdev, 0);
624 ret = devm_request_irq(dev, cci->irq, cci_isr, 0, dev_name(dev), cci);
626 dev_err(dev, "request_irq failed, ret: %d\n", ret);
630 val = readl(cci->base + CCI_HW_VERSION);
631 dev_dbg(dev, "CCI HW version = 0x%08x", val);
633 ret = cci_reset(cci);
641 for (i = 0; i < cci->data->num_masters; i++) {
642 if (!cci->master[i].cci)
645 ret = i2c_add_adapter(&cci->master[i].adap);
650 pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
651 pm_runtime_use_autosuspend(dev);
652 pm_runtime_set_active(dev);
653 pm_runtime_enable(dev);
658 for (; i >= 0; i--) {
659 if (cci->master[i].cci)
660 i2c_del_adapter(&cci->master[i].adap);
663 disable_irq(cci->irq);
665 cci_disable_clocks(cci);
670 static int cci_remove(struct platform_device *pdev)
672 struct cci *cci = platform_get_drvdata(pdev);
675 for (i = 0; i < cci->data->num_masters; i++) {
676 if (cci->master[i].cci)
677 i2c_del_adapter(&cci->master[i].adap);
681 disable_irq(cci->irq);
682 pm_runtime_disable(&pdev->dev);
683 pm_runtime_set_suspended(&pdev->dev);
688 static const struct cci_data cci_v1_data = {
690 .queue_size = { 64, 16 },
695 .cci_clk_rate = 19200000,
696 .params[I2C_MODE_STANDARD] = {
708 .params[I2C_MODE_FAST] = {
722 static const struct cci_data cci_v2_data = {
724 .queue_size = { 64, 16 },
729 .cci_clk_rate = 37500000,
730 .params[I2C_MODE_STANDARD] = {
742 .params[I2C_MODE_FAST] = {
754 .params[I2C_MODE_FAST_PLUS] = {
768 static const struct of_device_id cci_dt_match[] = {
769 { .compatible = "qcom,msm8916-cci", .data = &cci_v1_data},
770 { .compatible = "qcom,msm8996-cci", .data = &cci_v2_data},
771 { .compatible = "qcom,sdm845-cci", .data = &cci_v2_data},
772 { .compatible = "qcom,sm8250-cci", .data = &cci_v2_data},
775 MODULE_DEVICE_TABLE(of, cci_dt_match);
777 static struct platform_driver qcom_cci_driver = {
779 .remove = cci_remove,
781 .name = "i2c-qcom-cci",
782 .of_match_table = cci_dt_match,
787 module_platform_driver(qcom_cci_driver);
789 MODULE_DESCRIPTION("Qualcomm Camera Control Interface driver");
792 MODULE_LICENSE("GPL v2");