]> Git Repo - linux.git/blob - drivers/i3c/master/mipi-i3c-hci/core.c
Linux 6.14-rc3
[linux.git] / drivers / i3c / master / mipi-i3c-hci / core.c
1 // SPDX-License-Identifier: BSD-3-Clause
2 /*
3  * Copyright (c) 2020, MIPI Alliance, Inc.
4  *
5  * Author: Nicolas Pitre <[email protected]>
6  *
7  * Core driver code with main interface to the I3C subsystem.
8  */
9
10 #include <linux/bitfield.h>
11 #include <linux/device.h>
12 #include <linux/errno.h>
13 #include <linux/i3c/master.h>
14 #include <linux/interrupt.h>
15 #include <linux/iopoll.h>
16 #include <linux/module.h>
17 #include <linux/platform_device.h>
18
19 #include "hci.h"
20 #include "ext_caps.h"
21 #include "cmd.h"
22 #include "dat.h"
23
24
25 /*
26  * Host Controller Capabilities and Operation Registers
27  */
28
29 #define HCI_VERSION                     0x00    /* HCI Version (in BCD) */
30
31 #define HC_CONTROL                      0x04
32 #define HC_CONTROL_BUS_ENABLE           BIT(31)
33 #define HC_CONTROL_RESUME               BIT(30)
34 #define HC_CONTROL_ABORT                BIT(29)
35 #define HC_CONTROL_HALT_ON_CMD_TIMEOUT  BIT(12)
36 #define HC_CONTROL_HOT_JOIN_CTRL        BIT(8)  /* Hot-Join ACK/NACK Control */
37 #define HC_CONTROL_I2C_TARGET_PRESENT   BIT(7)
38 #define HC_CONTROL_PIO_MODE             BIT(6)  /* DMA/PIO Mode Selector */
39 #define HC_CONTROL_DATA_BIG_ENDIAN      BIT(4)
40 #define HC_CONTROL_IBA_INCLUDE          BIT(0)  /* Include I3C Broadcast Address */
41
42 #define MASTER_DEVICE_ADDR              0x08    /* Master Device Address */
43 #define MASTER_DYNAMIC_ADDR_VALID       BIT(31) /* Dynamic Address is Valid */
44 #define MASTER_DYNAMIC_ADDR(v)          FIELD_PREP(GENMASK(22, 16), v)
45
46 #define HC_CAPABILITIES                 0x0c
47 #define HC_CAP_SG_DC_EN                 BIT(30)
48 #define HC_CAP_SG_IBI_EN                BIT(29)
49 #define HC_CAP_SG_CR_EN                 BIT(28)
50 #define HC_CAP_MAX_DATA_LENGTH          GENMASK(24, 22)
51 #define HC_CAP_CMD_SIZE                 GENMASK(21, 20)
52 #define HC_CAP_DIRECT_COMMANDS_EN       BIT(18)
53 #define HC_CAP_MULTI_LANE_EN            BIT(15)
54 #define HC_CAP_CMD_CCC_DEFBYTE          BIT(10)
55 #define HC_CAP_HDR_BT_EN                BIT(8)
56 #define HC_CAP_HDR_TS_EN                BIT(7)
57 #define HC_CAP_HDR_DDR_EN               BIT(6)
58 #define HC_CAP_NON_CURRENT_MASTER_CAP   BIT(5)  /* master handoff capable */
59 #define HC_CAP_DATA_BYTE_CFG_EN         BIT(4)  /* endian selection possible */
60 #define HC_CAP_AUTO_COMMAND             BIT(3)
61 #define HC_CAP_COMBO_COMMAND            BIT(2)
62
63 #define RESET_CONTROL                   0x10
64 #define BUS_RESET                       BIT(31)
65 #define BUS_RESET_TYPE                  GENMASK(30, 29)
66 #define IBI_QUEUE_RST                   BIT(5)
67 #define RX_FIFO_RST                     BIT(4)
68 #define TX_FIFO_RST                     BIT(3)
69 #define RESP_QUEUE_RST                  BIT(2)
70 #define CMD_QUEUE_RST                   BIT(1)
71 #define SOFT_RST                        BIT(0)  /* Core Reset */
72
73 #define PRESENT_STATE                   0x14
74 #define STATE_CURRENT_MASTER            BIT(2)
75
76 #define INTR_STATUS                     0x20
77 #define INTR_STATUS_ENABLE              0x24
78 #define INTR_SIGNAL_ENABLE              0x28
79 #define INTR_FORCE                      0x2c
80 #define INTR_HC_CMD_SEQ_UFLOW_STAT      BIT(12) /* Cmd Sequence Underflow */
81 #define INTR_HC_RESET_CANCEL            BIT(11) /* HC Cancelled Reset */
82 #define INTR_HC_INTERNAL_ERR            BIT(10) /* HC Internal Error */
83
84 #define DAT_SECTION                     0x30    /* Device Address Table */
85 #define DAT_ENTRY_SIZE                  GENMASK(31, 28)
86 #define DAT_TABLE_SIZE                  GENMASK(18, 12)
87 #define DAT_TABLE_OFFSET                GENMASK(11, 0)
88
89 #define DCT_SECTION                     0x34    /* Device Characteristics Table */
90 #define DCT_ENTRY_SIZE                  GENMASK(31, 28)
91 #define DCT_TABLE_INDEX                 GENMASK(23, 19)
92 #define DCT_TABLE_SIZE                  GENMASK(18, 12)
93 #define DCT_TABLE_OFFSET                GENMASK(11, 0)
94
95 #define RING_HEADERS_SECTION            0x38
96 #define RING_HEADERS_OFFSET             GENMASK(15, 0)
97
98 #define PIO_SECTION                     0x3c
99 #define PIO_REGS_OFFSET                 GENMASK(15, 0)  /* PIO Offset */
100
101 #define EXT_CAPS_SECTION                0x40
102 #define EXT_CAPS_OFFSET                 GENMASK(15, 0)
103
104 #define IBI_NOTIFY_CTRL                 0x58    /* IBI Notify Control */
105 #define IBI_NOTIFY_SIR_REJECTED         BIT(3)  /* Rejected Target Interrupt Request */
106 #define IBI_NOTIFY_MR_REJECTED          BIT(1)  /* Rejected Master Request Control */
107 #define IBI_NOTIFY_HJ_REJECTED          BIT(0)  /* Rejected Hot-Join Control */
108
109 #define DEV_CTX_BASE_LO                 0x60
110 #define DEV_CTX_BASE_HI                 0x64
111
112
113 static inline struct i3c_hci *to_i3c_hci(struct i3c_master_controller *m)
114 {
115         return container_of(m, struct i3c_hci, master);
116 }
117
118 static int i3c_hci_bus_init(struct i3c_master_controller *m)
119 {
120         struct i3c_hci *hci = to_i3c_hci(m);
121         struct i3c_device_info info;
122         int ret;
123
124         DBG("");
125
126         if (hci->cmd == &mipi_i3c_hci_cmd_v1) {
127                 ret = mipi_i3c_hci_dat_v1.init(hci);
128                 if (ret)
129                         return ret;
130         }
131
132         ret = i3c_master_get_free_addr(m, 0);
133         if (ret < 0)
134                 return ret;
135         reg_write(MASTER_DEVICE_ADDR,
136                   MASTER_DYNAMIC_ADDR(ret) | MASTER_DYNAMIC_ADDR_VALID);
137         memset(&info, 0, sizeof(info));
138         info.dyn_addr = ret;
139         ret = i3c_master_set_info(m, &info);
140         if (ret)
141                 return ret;
142
143         ret = hci->io->init(hci);
144         if (ret)
145                 return ret;
146
147         /* Set RESP_BUF_THLD to 0(n) to get 1(n+1) response */
148         if (hci->quirks & HCI_QUIRK_RESP_BUF_THLD)
149                 amd_set_resp_buf_thld(hci);
150
151         reg_set(HC_CONTROL, HC_CONTROL_BUS_ENABLE);
152         DBG("HC_CONTROL = %#x", reg_read(HC_CONTROL));
153
154         return 0;
155 }
156
157 static void i3c_hci_bus_cleanup(struct i3c_master_controller *m)
158 {
159         struct i3c_hci *hci = to_i3c_hci(m);
160         struct platform_device *pdev = to_platform_device(m->dev.parent);
161
162         DBG("");
163
164         reg_clear(HC_CONTROL, HC_CONTROL_BUS_ENABLE);
165         synchronize_irq(platform_get_irq(pdev, 0));
166         hci->io->cleanup(hci);
167         if (hci->cmd == &mipi_i3c_hci_cmd_v1)
168                 mipi_i3c_hci_dat_v1.cleanup(hci);
169 }
170
171 void mipi_i3c_hci_resume(struct i3c_hci *hci)
172 {
173         reg_set(HC_CONTROL, HC_CONTROL_RESUME);
174 }
175
176 /* located here rather than pio.c because needed bits are in core reg space */
177 void mipi_i3c_hci_pio_reset(struct i3c_hci *hci)
178 {
179         reg_write(RESET_CONTROL, RX_FIFO_RST | TX_FIFO_RST | RESP_QUEUE_RST);
180 }
181
182 /* located here rather than dct.c because needed bits are in core reg space */
183 void mipi_i3c_hci_dct_index_reset(struct i3c_hci *hci)
184 {
185         reg_write(DCT_SECTION, FIELD_PREP(DCT_TABLE_INDEX, 0));
186 }
187
188 static int i3c_hci_send_ccc_cmd(struct i3c_master_controller *m,
189                                 struct i3c_ccc_cmd *ccc)
190 {
191         struct i3c_hci *hci = to_i3c_hci(m);
192         struct hci_xfer *xfer;
193         bool raw = !!(hci->quirks & HCI_QUIRK_RAW_CCC);
194         bool prefixed = raw && !!(ccc->id & I3C_CCC_DIRECT);
195         unsigned int nxfers = ccc->ndests + prefixed;
196         DECLARE_COMPLETION_ONSTACK(done);
197         int i, last, ret = 0;
198
199         DBG("cmd=%#x rnw=%d ndests=%d data[0].len=%d",
200             ccc->id, ccc->rnw, ccc->ndests, ccc->dests[0].payload.len);
201
202         xfer = hci_alloc_xfer(nxfers);
203         if (!xfer)
204                 return -ENOMEM;
205
206         if (prefixed) {
207                 xfer->data = NULL;
208                 xfer->data_len = 0;
209                 xfer->rnw = false;
210                 hci->cmd->prep_ccc(hci, xfer, I3C_BROADCAST_ADDR,
211                                    ccc->id, true);
212                 xfer++;
213         }
214
215         for (i = 0; i < nxfers - prefixed; i++) {
216                 xfer[i].data = ccc->dests[i].payload.data;
217                 xfer[i].data_len = ccc->dests[i].payload.len;
218                 xfer[i].rnw = ccc->rnw;
219                 ret = hci->cmd->prep_ccc(hci, &xfer[i], ccc->dests[i].addr,
220                                          ccc->id, raw);
221                 if (ret)
222                         goto out;
223                 xfer[i].cmd_desc[0] |= CMD_0_ROC;
224         }
225         last = i - 1;
226         xfer[last].cmd_desc[0] |= CMD_0_TOC;
227         xfer[last].completion = &done;
228
229         if (prefixed)
230                 xfer--;
231
232         ret = hci->io->queue_xfer(hci, xfer, nxfers);
233         if (ret)
234                 goto out;
235         if (!wait_for_completion_timeout(&done, HZ) &&
236             hci->io->dequeue_xfer(hci, xfer, nxfers)) {
237                 ret = -ETIME;
238                 goto out;
239         }
240         for (i = prefixed; i < nxfers; i++) {
241                 if (ccc->rnw)
242                         ccc->dests[i - prefixed].payload.len =
243                                 RESP_DATA_LENGTH(xfer[i].response);
244                 switch (RESP_STATUS(xfer[i].response)) {
245                 case RESP_SUCCESS:
246                         continue;
247                 case RESP_ERR_ADDR_HEADER:
248                 case RESP_ERR_NACK:
249                         ccc->err = I3C_ERROR_M2;
250                         fallthrough;
251                 default:
252                         ret = -EIO;
253                         goto out;
254                 }
255         }
256
257         if (ccc->rnw)
258                 DBG("got: %*ph",
259                     ccc->dests[0].payload.len, ccc->dests[0].payload.data);
260
261 out:
262         hci_free_xfer(xfer, nxfers);
263         return ret;
264 }
265
266 static int i3c_hci_daa(struct i3c_master_controller *m)
267 {
268         struct i3c_hci *hci = to_i3c_hci(m);
269
270         DBG("");
271
272         return hci->cmd->perform_daa(hci);
273 }
274
275 static int i3c_hci_alloc_safe_xfer_buf(struct i3c_hci *hci,
276                                        struct hci_xfer *xfer)
277 {
278         if (hci->io != &mipi_i3c_hci_dma ||
279             xfer->data == NULL || !is_vmalloc_addr(xfer->data))
280                 return 0;
281
282         if (xfer->rnw)
283                 xfer->bounce_buf = kzalloc(xfer->data_len, GFP_KERNEL);
284         else
285                 xfer->bounce_buf = kmemdup(xfer->data,
286                                            xfer->data_len, GFP_KERNEL);
287
288         return xfer->bounce_buf == NULL ? -ENOMEM : 0;
289 }
290
291 static void i3c_hci_free_safe_xfer_buf(struct i3c_hci *hci,
292                                        struct hci_xfer *xfer)
293 {
294         if (hci->io != &mipi_i3c_hci_dma || xfer->bounce_buf == NULL)
295                 return;
296
297         if (xfer->rnw)
298                 memcpy(xfer->data, xfer->bounce_buf, xfer->data_len);
299
300         kfree(xfer->bounce_buf);
301 }
302
303 static int i3c_hci_priv_xfers(struct i3c_dev_desc *dev,
304                               struct i3c_priv_xfer *i3c_xfers,
305                               int nxfers)
306 {
307         struct i3c_master_controller *m = i3c_dev_get_master(dev);
308         struct i3c_hci *hci = to_i3c_hci(m);
309         struct hci_xfer *xfer;
310         DECLARE_COMPLETION_ONSTACK(done);
311         unsigned int size_limit;
312         int i, last, ret = 0;
313
314         DBG("nxfers = %d", nxfers);
315
316         xfer = hci_alloc_xfer(nxfers);
317         if (!xfer)
318                 return -ENOMEM;
319
320         size_limit = 1U << (16 + FIELD_GET(HC_CAP_MAX_DATA_LENGTH, hci->caps));
321
322         for (i = 0; i < nxfers; i++) {
323                 xfer[i].data_len = i3c_xfers[i].len;
324                 ret = -EFBIG;
325                 if (xfer[i].data_len >= size_limit)
326                         goto out;
327                 xfer[i].rnw = i3c_xfers[i].rnw;
328                 if (i3c_xfers[i].rnw) {
329                         xfer[i].data = i3c_xfers[i].data.in;
330                 } else {
331                         /* silence the const qualifier warning with a cast */
332                         xfer[i].data = (void *) i3c_xfers[i].data.out;
333                 }
334                 hci->cmd->prep_i3c_xfer(hci, dev, &xfer[i]);
335                 xfer[i].cmd_desc[0] |= CMD_0_ROC;
336                 ret = i3c_hci_alloc_safe_xfer_buf(hci, &xfer[i]);
337                 if (ret)
338                         goto out;
339         }
340         last = i - 1;
341         xfer[last].cmd_desc[0] |= CMD_0_TOC;
342         xfer[last].completion = &done;
343
344         ret = hci->io->queue_xfer(hci, xfer, nxfers);
345         if (ret)
346                 goto out;
347         if (!wait_for_completion_timeout(&done, HZ) &&
348             hci->io->dequeue_xfer(hci, xfer, nxfers)) {
349                 ret = -ETIME;
350                 goto out;
351         }
352         for (i = 0; i < nxfers; i++) {
353                 if (i3c_xfers[i].rnw)
354                         i3c_xfers[i].len = RESP_DATA_LENGTH(xfer[i].response);
355                 if (RESP_STATUS(xfer[i].response) != RESP_SUCCESS) {
356                         ret = -EIO;
357                         goto out;
358                 }
359         }
360
361 out:
362         for (i = 0; i < nxfers; i++)
363                 i3c_hci_free_safe_xfer_buf(hci, &xfer[i]);
364
365         hci_free_xfer(xfer, nxfers);
366         return ret;
367 }
368
369 static int i3c_hci_i2c_xfers(struct i2c_dev_desc *dev,
370                              const struct i2c_msg *i2c_xfers, int nxfers)
371 {
372         struct i3c_master_controller *m = i2c_dev_get_master(dev);
373         struct i3c_hci *hci = to_i3c_hci(m);
374         struct hci_xfer *xfer;
375         DECLARE_COMPLETION_ONSTACK(done);
376         int i, last, ret = 0;
377
378         DBG("nxfers = %d", nxfers);
379
380         xfer = hci_alloc_xfer(nxfers);
381         if (!xfer)
382                 return -ENOMEM;
383
384         for (i = 0; i < nxfers; i++) {
385                 xfer[i].data = i2c_xfers[i].buf;
386                 xfer[i].data_len = i2c_xfers[i].len;
387                 xfer[i].rnw = i2c_xfers[i].flags & I2C_M_RD;
388                 hci->cmd->prep_i2c_xfer(hci, dev, &xfer[i]);
389                 xfer[i].cmd_desc[0] |= CMD_0_ROC;
390                 ret = i3c_hci_alloc_safe_xfer_buf(hci, &xfer[i]);
391                 if (ret)
392                         goto out;
393         }
394         last = i - 1;
395         xfer[last].cmd_desc[0] |= CMD_0_TOC;
396         xfer[last].completion = &done;
397
398         ret = hci->io->queue_xfer(hci, xfer, nxfers);
399         if (ret)
400                 goto out;
401         if (!wait_for_completion_timeout(&done, HZ) &&
402             hci->io->dequeue_xfer(hci, xfer, nxfers)) {
403                 ret = -ETIME;
404                 goto out;
405         }
406         for (i = 0; i < nxfers; i++) {
407                 if (RESP_STATUS(xfer[i].response) != RESP_SUCCESS) {
408                         ret = -EIO;
409                         goto out;
410                 }
411         }
412
413 out:
414         for (i = 0; i < nxfers; i++)
415                 i3c_hci_free_safe_xfer_buf(hci, &xfer[i]);
416
417         hci_free_xfer(xfer, nxfers);
418         return ret;
419 }
420
421 static int i3c_hci_attach_i3c_dev(struct i3c_dev_desc *dev)
422 {
423         struct i3c_master_controller *m = i3c_dev_get_master(dev);
424         struct i3c_hci *hci = to_i3c_hci(m);
425         struct i3c_hci_dev_data *dev_data;
426         int ret;
427
428         DBG("");
429
430         dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
431         if (!dev_data)
432                 return -ENOMEM;
433         if (hci->cmd == &mipi_i3c_hci_cmd_v1) {
434                 ret = mipi_i3c_hci_dat_v1.alloc_entry(hci);
435                 if (ret < 0) {
436                         kfree(dev_data);
437                         return ret;
438                 }
439                 mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, ret,
440                                                      dev->info.dyn_addr ?: dev->info.static_addr);
441                 dev_data->dat_idx = ret;
442         }
443         i3c_dev_set_master_data(dev, dev_data);
444         return 0;
445 }
446
447 static int i3c_hci_reattach_i3c_dev(struct i3c_dev_desc *dev, u8 old_dyn_addr)
448 {
449         struct i3c_master_controller *m = i3c_dev_get_master(dev);
450         struct i3c_hci *hci = to_i3c_hci(m);
451         struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
452
453         DBG("");
454
455         if (hci->cmd == &mipi_i3c_hci_cmd_v1)
456                 mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, dev_data->dat_idx,
457                                              dev->info.dyn_addr);
458         return 0;
459 }
460
461 static void i3c_hci_detach_i3c_dev(struct i3c_dev_desc *dev)
462 {
463         struct i3c_master_controller *m = i3c_dev_get_master(dev);
464         struct i3c_hci *hci = to_i3c_hci(m);
465         struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
466
467         DBG("");
468
469         i3c_dev_set_master_data(dev, NULL);
470         if (hci->cmd == &mipi_i3c_hci_cmd_v1)
471                 mipi_i3c_hci_dat_v1.free_entry(hci, dev_data->dat_idx);
472         kfree(dev_data);
473 }
474
475 static int i3c_hci_attach_i2c_dev(struct i2c_dev_desc *dev)
476 {
477         struct i3c_master_controller *m = i2c_dev_get_master(dev);
478         struct i3c_hci *hci = to_i3c_hci(m);
479         struct i3c_hci_dev_data *dev_data;
480         int ret;
481
482         DBG("");
483
484         if (hci->cmd != &mipi_i3c_hci_cmd_v1)
485                 return 0;
486         dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
487         if (!dev_data)
488                 return -ENOMEM;
489         ret = mipi_i3c_hci_dat_v1.alloc_entry(hci);
490         if (ret < 0) {
491                 kfree(dev_data);
492                 return ret;
493         }
494         mipi_i3c_hci_dat_v1.set_static_addr(hci, ret, dev->addr);
495         mipi_i3c_hci_dat_v1.set_flags(hci, ret, DAT_0_I2C_DEVICE, 0);
496         dev_data->dat_idx = ret;
497         i2c_dev_set_master_data(dev, dev_data);
498         return 0;
499 }
500
501 static void i3c_hci_detach_i2c_dev(struct i2c_dev_desc *dev)
502 {
503         struct i3c_master_controller *m = i2c_dev_get_master(dev);
504         struct i3c_hci *hci = to_i3c_hci(m);
505         struct i3c_hci_dev_data *dev_data = i2c_dev_get_master_data(dev);
506
507         DBG("");
508
509         if (dev_data) {
510                 i2c_dev_set_master_data(dev, NULL);
511                 if (hci->cmd == &mipi_i3c_hci_cmd_v1)
512                         mipi_i3c_hci_dat_v1.free_entry(hci, dev_data->dat_idx);
513                 kfree(dev_data);
514         }
515 }
516
517 static int i3c_hci_request_ibi(struct i3c_dev_desc *dev,
518                                const struct i3c_ibi_setup *req)
519 {
520         struct i3c_master_controller *m = i3c_dev_get_master(dev);
521         struct i3c_hci *hci = to_i3c_hci(m);
522         struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
523         unsigned int dat_idx = dev_data->dat_idx;
524
525         if (req->max_payload_len != 0)
526                 mipi_i3c_hci_dat_v1.set_flags(hci, dat_idx, DAT_0_IBI_PAYLOAD, 0);
527         else
528                 mipi_i3c_hci_dat_v1.clear_flags(hci, dat_idx, DAT_0_IBI_PAYLOAD, 0);
529         return hci->io->request_ibi(hci, dev, req);
530 }
531
532 static void i3c_hci_free_ibi(struct i3c_dev_desc *dev)
533 {
534         struct i3c_master_controller *m = i3c_dev_get_master(dev);
535         struct i3c_hci *hci = to_i3c_hci(m);
536
537         hci->io->free_ibi(hci, dev);
538 }
539
540 static int i3c_hci_enable_ibi(struct i3c_dev_desc *dev)
541 {
542         struct i3c_master_controller *m = i3c_dev_get_master(dev);
543         struct i3c_hci *hci = to_i3c_hci(m);
544         struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
545
546         mipi_i3c_hci_dat_v1.clear_flags(hci, dev_data->dat_idx, DAT_0_SIR_REJECT, 0);
547         return i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
548 }
549
550 static int i3c_hci_disable_ibi(struct i3c_dev_desc *dev)
551 {
552         struct i3c_master_controller *m = i3c_dev_get_master(dev);
553         struct i3c_hci *hci = to_i3c_hci(m);
554         struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
555
556         mipi_i3c_hci_dat_v1.set_flags(hci, dev_data->dat_idx, DAT_0_SIR_REJECT, 0);
557         return i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
558 }
559
560 static void i3c_hci_recycle_ibi_slot(struct i3c_dev_desc *dev,
561                                      struct i3c_ibi_slot *slot)
562 {
563         struct i3c_master_controller *m = i3c_dev_get_master(dev);
564         struct i3c_hci *hci = to_i3c_hci(m);
565
566         hci->io->recycle_ibi_slot(hci, dev, slot);
567 }
568
569 static const struct i3c_master_controller_ops i3c_hci_ops = {
570         .bus_init               = i3c_hci_bus_init,
571         .bus_cleanup            = i3c_hci_bus_cleanup,
572         .do_daa                 = i3c_hci_daa,
573         .send_ccc_cmd           = i3c_hci_send_ccc_cmd,
574         .priv_xfers             = i3c_hci_priv_xfers,
575         .i2c_xfers              = i3c_hci_i2c_xfers,
576         .attach_i3c_dev         = i3c_hci_attach_i3c_dev,
577         .reattach_i3c_dev       = i3c_hci_reattach_i3c_dev,
578         .detach_i3c_dev         = i3c_hci_detach_i3c_dev,
579         .attach_i2c_dev         = i3c_hci_attach_i2c_dev,
580         .detach_i2c_dev         = i3c_hci_detach_i2c_dev,
581         .request_ibi            = i3c_hci_request_ibi,
582         .free_ibi               = i3c_hci_free_ibi,
583         .enable_ibi             = i3c_hci_enable_ibi,
584         .disable_ibi            = i3c_hci_disable_ibi,
585         .recycle_ibi_slot       = i3c_hci_recycle_ibi_slot,
586 };
587
588 static irqreturn_t i3c_hci_irq_handler(int irq, void *dev_id)
589 {
590         struct i3c_hci *hci = dev_id;
591         irqreturn_t result = IRQ_NONE;
592         u32 val;
593
594         val = reg_read(INTR_STATUS);
595         DBG("INTR_STATUS = %#x", val);
596
597         if (val) {
598                 reg_write(INTR_STATUS, val);
599         }
600
601         if (val & INTR_HC_RESET_CANCEL) {
602                 DBG("cancelled reset");
603                 val &= ~INTR_HC_RESET_CANCEL;
604         }
605         if (val & INTR_HC_INTERNAL_ERR) {
606                 dev_err(&hci->master.dev, "Host Controller Internal Error\n");
607                 val &= ~INTR_HC_INTERNAL_ERR;
608         }
609
610         hci->io->irq_handler(hci);
611
612         if (val)
613                 dev_err(&hci->master.dev, "unexpected INTR_STATUS %#x\n", val);
614         else
615                 result = IRQ_HANDLED;
616
617         return result;
618 }
619
620 static int i3c_hci_init(struct i3c_hci *hci)
621 {
622         bool size_in_dwords, mode_selector;
623         u32 regval, offset;
624         int ret;
625
626         /* Validate HCI hardware version */
627         regval = reg_read(HCI_VERSION);
628         hci->version_major = (regval >> 8) & 0xf;
629         hci->version_minor = (regval >> 4) & 0xf;
630         hci->revision = regval & 0xf;
631         dev_notice(&hci->master.dev, "MIPI I3C HCI v%u.%u r%02u\n",
632                    hci->version_major, hci->version_minor, hci->revision);
633         /* known versions */
634         switch (regval & ~0xf) {
635         case 0x100:     /* version 1.0 */
636         case 0x110:     /* version 1.1 */
637         case 0x200:     /* version 2.0 */
638                 break;
639         default:
640                 dev_err(&hci->master.dev, "unsupported HCI version\n");
641                 return -EPROTONOSUPPORT;
642         }
643
644         hci->caps = reg_read(HC_CAPABILITIES);
645         DBG("caps = %#x", hci->caps);
646
647         size_in_dwords = hci->version_major < 1 ||
648                          (hci->version_major == 1 && hci->version_minor < 1);
649
650         regval = reg_read(DAT_SECTION);
651         offset = FIELD_GET(DAT_TABLE_OFFSET, regval);
652         hci->DAT_regs = offset ? hci->base_regs + offset : NULL;
653         hci->DAT_entries = FIELD_GET(DAT_TABLE_SIZE, regval);
654         hci->DAT_entry_size = FIELD_GET(DAT_ENTRY_SIZE, regval) ? 0 : 8;
655         if (size_in_dwords)
656                 hci->DAT_entries = 4 * hci->DAT_entries / hci->DAT_entry_size;
657         dev_info(&hci->master.dev, "DAT: %u %u-bytes entries at offset %#x\n",
658                  hci->DAT_entries, hci->DAT_entry_size, offset);
659
660         regval = reg_read(DCT_SECTION);
661         offset = FIELD_GET(DCT_TABLE_OFFSET, regval);
662         hci->DCT_regs = offset ? hci->base_regs + offset : NULL;
663         hci->DCT_entries = FIELD_GET(DCT_TABLE_SIZE, regval);
664         hci->DCT_entry_size = FIELD_GET(DCT_ENTRY_SIZE, regval) ? 0 : 16;
665         if (size_in_dwords)
666                 hci->DCT_entries = 4 * hci->DCT_entries / hci->DCT_entry_size;
667         dev_info(&hci->master.dev, "DCT: %u %u-bytes entries at offset %#x\n",
668                  hci->DCT_entries, hci->DCT_entry_size, offset);
669
670         regval = reg_read(RING_HEADERS_SECTION);
671         offset = FIELD_GET(RING_HEADERS_OFFSET, regval);
672         hci->RHS_regs = offset ? hci->base_regs + offset : NULL;
673         dev_info(&hci->master.dev, "Ring Headers at offset %#x\n", offset);
674
675         regval = reg_read(PIO_SECTION);
676         offset = FIELD_GET(PIO_REGS_OFFSET, regval);
677         hci->PIO_regs = offset ? hci->base_regs + offset : NULL;
678         dev_info(&hci->master.dev, "PIO section at offset %#x\n", offset);
679
680         regval = reg_read(EXT_CAPS_SECTION);
681         offset = FIELD_GET(EXT_CAPS_OFFSET, regval);
682         hci->EXTCAPS_regs = offset ? hci->base_regs + offset : NULL;
683         dev_info(&hci->master.dev, "Extended Caps at offset %#x\n", offset);
684
685         ret = i3c_hci_parse_ext_caps(hci);
686         if (ret)
687                 return ret;
688
689         /*
690          * Now let's reset the hardware.
691          * SOFT_RST must be clear before we write to it.
692          * Then we must wait until it clears again.
693          */
694         ret = readx_poll_timeout(reg_read, RESET_CONTROL, regval,
695                                  !(regval & SOFT_RST), 1, 10000);
696         if (ret)
697                 return -ENXIO;
698         reg_write(RESET_CONTROL, SOFT_RST);
699         ret = readx_poll_timeout(reg_read, RESET_CONTROL, regval,
700                                  !(regval & SOFT_RST), 1, 10000);
701         if (ret)
702                 return -ENXIO;
703
704         /* Disable all interrupts and allow all signal updates */
705         reg_write(INTR_SIGNAL_ENABLE, 0x0);
706         reg_write(INTR_STATUS_ENABLE, 0xffffffff);
707
708         /* Make sure our data ordering fits the host's */
709         regval = reg_read(HC_CONTROL);
710         if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
711                 if (!(regval & HC_CONTROL_DATA_BIG_ENDIAN)) {
712                         regval |= HC_CONTROL_DATA_BIG_ENDIAN;
713                         reg_write(HC_CONTROL, regval);
714                         regval = reg_read(HC_CONTROL);
715                         if (!(regval & HC_CONTROL_DATA_BIG_ENDIAN)) {
716                                 dev_err(&hci->master.dev, "cannot set BE mode\n");
717                                 return -EOPNOTSUPP;
718                         }
719                 }
720         } else {
721                 if (regval & HC_CONTROL_DATA_BIG_ENDIAN) {
722                         regval &= ~HC_CONTROL_DATA_BIG_ENDIAN;
723                         reg_write(HC_CONTROL, regval);
724                         regval = reg_read(HC_CONTROL);
725                         if (regval & HC_CONTROL_DATA_BIG_ENDIAN) {
726                                 dev_err(&hci->master.dev, "cannot clear BE mode\n");
727                                 return -EOPNOTSUPP;
728                         }
729                 }
730         }
731
732         /* Select our command descriptor model */
733         switch (FIELD_GET(HC_CAP_CMD_SIZE, hci->caps)) {
734         case 0:
735                 hci->cmd = &mipi_i3c_hci_cmd_v1;
736                 break;
737         case 1:
738                 hci->cmd = &mipi_i3c_hci_cmd_v2;
739                 break;
740         default:
741                 dev_err(&hci->master.dev, "wrong CMD_SIZE capability value\n");
742                 return -EINVAL;
743         }
744
745         mode_selector = hci->version_major > 1 ||
746                                 (hci->version_major == 1 && hci->version_minor > 0);
747
748         /* Quirk for HCI_QUIRK_PIO_MODE on AMD platforms */
749         if (hci->quirks & HCI_QUIRK_PIO_MODE)
750                 hci->RHS_regs = NULL;
751
752         /* Try activating DMA operations first */
753         if (hci->RHS_regs) {
754                 reg_clear(HC_CONTROL, HC_CONTROL_PIO_MODE);
755                 if (mode_selector && (reg_read(HC_CONTROL) & HC_CONTROL_PIO_MODE)) {
756                         dev_err(&hci->master.dev, "PIO mode is stuck\n");
757                         ret = -EIO;
758                 } else {
759                         hci->io = &mipi_i3c_hci_dma;
760                         dev_info(&hci->master.dev, "Using DMA\n");
761                 }
762         }
763
764         /* If no DMA, try PIO */
765         if (!hci->io && hci->PIO_regs) {
766                 reg_set(HC_CONTROL, HC_CONTROL_PIO_MODE);
767                 if (mode_selector && !(reg_read(HC_CONTROL) & HC_CONTROL_PIO_MODE)) {
768                         dev_err(&hci->master.dev, "DMA mode is stuck\n");
769                         ret = -EIO;
770                 } else {
771                         hci->io = &mipi_i3c_hci_pio;
772                         dev_info(&hci->master.dev, "Using PIO\n");
773                 }
774         }
775
776         if (!hci->io) {
777                 dev_err(&hci->master.dev, "neither DMA nor PIO can be used\n");
778                 if (!ret)
779                         ret = -EINVAL;
780                 return ret;
781         }
782
783         /* Configure OD and PP timings for AMD platforms */
784         if (hci->quirks & HCI_QUIRK_OD_PP_TIMING)
785                 amd_set_od_pp_timing(hci);
786
787         return 0;
788 }
789
790 static int i3c_hci_probe(struct platform_device *pdev)
791 {
792         struct i3c_hci *hci;
793         int irq, ret;
794
795         hci = devm_kzalloc(&pdev->dev, sizeof(*hci), GFP_KERNEL);
796         if (!hci)
797                 return -ENOMEM;
798         hci->base_regs = devm_platform_ioremap_resource(pdev, 0);
799         if (IS_ERR(hci->base_regs))
800                 return PTR_ERR(hci->base_regs);
801
802         platform_set_drvdata(pdev, hci);
803         /* temporary for dev_printk's, to be replaced in i3c_master_register */
804         hci->master.dev.init_name = dev_name(&pdev->dev);
805
806         hci->quirks = (unsigned long)device_get_match_data(&pdev->dev);
807
808         ret = i3c_hci_init(hci);
809         if (ret)
810                 return ret;
811
812         irq = platform_get_irq(pdev, 0);
813         ret = devm_request_irq(&pdev->dev, irq, i3c_hci_irq_handler,
814                                0, NULL, hci);
815         if (ret)
816                 return ret;
817
818         ret = i3c_master_register(&hci->master, &pdev->dev,
819                                   &i3c_hci_ops, false);
820         if (ret)
821                 return ret;
822
823         return 0;
824 }
825
826 static void i3c_hci_remove(struct platform_device *pdev)
827 {
828         struct i3c_hci *hci = platform_get_drvdata(pdev);
829
830         i3c_master_unregister(&hci->master);
831 }
832
833 static const __maybe_unused struct of_device_id i3c_hci_of_match[] = {
834         { .compatible = "mipi-i3c-hci", },
835         {},
836 };
837 MODULE_DEVICE_TABLE(of, i3c_hci_of_match);
838
839 static const struct acpi_device_id i3c_hci_acpi_match[] = {
840         { "AMDI5017", HCI_QUIRK_PIO_MODE | HCI_QUIRK_OD_PP_TIMING | HCI_QUIRK_RESP_BUF_THLD },
841         {}
842 };
843 MODULE_DEVICE_TABLE(acpi, i3c_hci_acpi_match);
844
845 static struct platform_driver i3c_hci_driver = {
846         .probe = i3c_hci_probe,
847         .remove = i3c_hci_remove,
848         .driver = {
849                 .name = "mipi-i3c-hci",
850                 .of_match_table = of_match_ptr(i3c_hci_of_match),
851                 .acpi_match_table = i3c_hci_acpi_match,
852         },
853 };
854 module_platform_driver(i3c_hci_driver);
855 MODULE_ALIAS("platform:mipi-i3c-hci");
856
857 MODULE_AUTHOR("Nicolas Pitre <[email protected]>");
858 MODULE_DESCRIPTION("MIPI I3C HCI driver");
859 MODULE_LICENSE("Dual BSD/GPL");
This page took 0.08068 seconds and 4 git commands to generate.