1 // SPDX-License-Identifier: GPL-2.0
3 // Copyright (c) 2018 MediaTek Inc.
5 #include <linux/completion.h>
6 #include <linux/errno.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/module.h>
9 #include <linux/mailbox_controller.h>
11 #include <linux/soc/mediatek/mtk-cmdq.h>
13 #define CMDQ_WRITE_ENABLE_MASK BIT(0)
14 #define CMDQ_POLL_ENABLE_MASK BIT(0)
15 /* dedicate the last GPR_R15 to assign the register address to be poll */
16 #define CMDQ_POLL_ADDR_GPR (15)
17 #define CMDQ_EOC_IRQ_EN BIT(0)
18 #define CMDQ_IMMEDIATE_VALUE 0
19 #define CMDQ_REG_TYPE 1
20 #define CMDQ_JUMP_RELATIVE 0
21 #define CMDQ_JUMP_ABSOLUTE 1
23 struct cmdq_instruction {
49 static inline u8 cmdq_operand_get_type(struct cmdq_operand *op)
51 return op->reg ? CMDQ_REG_TYPE : CMDQ_IMMEDIATE_VALUE;
54 static inline u16 cmdq_operand_get_idx_value(struct cmdq_operand *op)
56 return op->reg ? op->idx : op->value;
59 int cmdq_dev_get_client_reg(struct device *dev,
60 struct cmdq_client_reg *client_reg, int idx)
62 struct of_phandle_args spec;
68 err = of_parse_phandle_with_fixed_args(dev->of_node,
69 "mediatek,gce-client-reg",
73 "error %d can't parse gce-client-reg property (%d)",
79 client_reg->subsys = (u8)spec.args[0];
80 client_reg->offset = (u16)spec.args[1];
81 client_reg->size = (u16)spec.args[2];
86 EXPORT_SYMBOL(cmdq_dev_get_client_reg);
88 struct cmdq_client *cmdq_mbox_create(struct device *dev, int index)
90 struct cmdq_client *client;
92 client = kzalloc(sizeof(*client), GFP_KERNEL);
94 return (struct cmdq_client *)-ENOMEM;
96 client->client.dev = dev;
97 client->client.tx_block = false;
98 client->client.knows_txdone = true;
99 client->chan = mbox_request_channel(&client->client, index);
101 if (IS_ERR(client->chan)) {
104 dev_err(dev, "failed to request channel\n");
105 err = PTR_ERR(client->chan);
113 EXPORT_SYMBOL(cmdq_mbox_create);
115 void cmdq_mbox_destroy(struct cmdq_client *client)
117 mbox_free_channel(client->chan);
120 EXPORT_SYMBOL(cmdq_mbox_destroy);
122 int cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt, size_t size)
127 pkt->va_base = kzalloc(size, GFP_KERNEL);
131 pkt->buf_size = size;
133 dev = client->chan->mbox->dev;
134 dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
136 if (dma_mapping_error(dev, dma_addr)) {
137 dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
142 pkt->pa_base = dma_addr;
146 EXPORT_SYMBOL(cmdq_pkt_create);
148 void cmdq_pkt_destroy(struct cmdq_client *client, struct cmdq_pkt *pkt)
150 dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
154 EXPORT_SYMBOL(cmdq_pkt_destroy);
156 static int cmdq_pkt_append_command(struct cmdq_pkt *pkt,
157 struct cmdq_instruction inst)
159 struct cmdq_instruction *cmd_ptr;
161 if (unlikely(pkt->cmd_buf_size + CMDQ_INST_SIZE > pkt->buf_size)) {
163 * In the case of allocated buffer size (pkt->buf_size) is used
164 * up, the real required size (pkt->cmdq_buf_size) is still
165 * increased, so that the user knows how much memory should be
166 * ultimately allocated after appending all commands and
167 * flushing the command packet. Therefor, the user can call
168 * cmdq_pkt_create() again with the real required buffer size.
170 pkt->cmd_buf_size += CMDQ_INST_SIZE;
171 WARN_ONCE(1, "%s: buffer size %u is too small !\n",
172 __func__, (u32)pkt->buf_size);
176 cmd_ptr = pkt->va_base + pkt->cmd_buf_size;
178 pkt->cmd_buf_size += CMDQ_INST_SIZE;
183 int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value)
185 struct cmdq_instruction inst;
187 inst.op = CMDQ_CODE_WRITE;
189 inst.offset = offset;
190 inst.subsys = subsys;
192 return cmdq_pkt_append_command(pkt, inst);
194 EXPORT_SYMBOL(cmdq_pkt_write);
196 int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
197 u16 offset, u32 value, u32 mask)
199 struct cmdq_instruction inst = { {0} };
200 u16 offset_mask = offset;
203 if (mask != 0xffffffff) {
204 inst.op = CMDQ_CODE_MASK;
206 err = cmdq_pkt_append_command(pkt, inst);
210 offset_mask |= CMDQ_WRITE_ENABLE_MASK;
212 err = cmdq_pkt_write(pkt, subsys, offset_mask, value);
216 EXPORT_SYMBOL(cmdq_pkt_write_mask);
218 int cmdq_pkt_read_s(struct cmdq_pkt *pkt, u16 high_addr_reg_idx, u16 addr_low,
221 struct cmdq_instruction inst = {};
223 inst.op = CMDQ_CODE_READ_S;
224 inst.dst_t = CMDQ_REG_TYPE;
225 inst.sop = high_addr_reg_idx;
226 inst.reg_dst = reg_idx;
227 inst.src_reg = addr_low;
229 return cmdq_pkt_append_command(pkt, inst);
231 EXPORT_SYMBOL(cmdq_pkt_read_s);
233 int cmdq_pkt_write_s(struct cmdq_pkt *pkt, u16 high_addr_reg_idx,
234 u16 addr_low, u16 src_reg_idx)
236 struct cmdq_instruction inst = {};
238 inst.op = CMDQ_CODE_WRITE_S;
239 inst.src_t = CMDQ_REG_TYPE;
240 inst.sop = high_addr_reg_idx;
241 inst.offset = addr_low;
242 inst.src_reg = src_reg_idx;
244 return cmdq_pkt_append_command(pkt, inst);
246 EXPORT_SYMBOL(cmdq_pkt_write_s);
248 int cmdq_pkt_write_s_mask(struct cmdq_pkt *pkt, u16 high_addr_reg_idx,
249 u16 addr_low, u16 src_reg_idx, u32 mask)
251 struct cmdq_instruction inst = {};
254 inst.op = CMDQ_CODE_MASK;
256 err = cmdq_pkt_append_command(pkt, inst);
261 inst.op = CMDQ_CODE_WRITE_S_MASK;
262 inst.src_t = CMDQ_REG_TYPE;
263 inst.sop = high_addr_reg_idx;
264 inst.offset = addr_low;
265 inst.src_reg = src_reg_idx;
267 return cmdq_pkt_append_command(pkt, inst);
269 EXPORT_SYMBOL(cmdq_pkt_write_s_mask);
271 int cmdq_pkt_write_s_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
272 u16 addr_low, u32 value)
274 struct cmdq_instruction inst = {};
276 inst.op = CMDQ_CODE_WRITE_S;
277 inst.sop = high_addr_reg_idx;
278 inst.offset = addr_low;
281 return cmdq_pkt_append_command(pkt, inst);
283 EXPORT_SYMBOL(cmdq_pkt_write_s_value);
285 int cmdq_pkt_write_s_mask_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx,
286 u16 addr_low, u32 value, u32 mask)
288 struct cmdq_instruction inst = {};
291 inst.op = CMDQ_CODE_MASK;
293 err = cmdq_pkt_append_command(pkt, inst);
297 inst.op = CMDQ_CODE_WRITE_S_MASK;
298 inst.sop = high_addr_reg_idx;
299 inst.offset = addr_low;
302 return cmdq_pkt_append_command(pkt, inst);
304 EXPORT_SYMBOL(cmdq_pkt_write_s_mask_value);
306 int cmdq_pkt_mem_move(struct cmdq_pkt *pkt, dma_addr_t src_addr, dma_addr_t dst_addr)
308 const u16 high_addr_reg_idx = CMDQ_THR_SPR_IDX0;
309 const u16 value_reg_idx = CMDQ_THR_SPR_IDX1;
312 /* read the value of src_addr into high_addr_reg_idx */
313 ret = cmdq_pkt_assign(pkt, high_addr_reg_idx, CMDQ_ADDR_HIGH(src_addr));
316 ret = cmdq_pkt_read_s(pkt, high_addr_reg_idx, CMDQ_ADDR_LOW(src_addr), value_reg_idx);
320 /* write the value of value_reg_idx into dst_addr */
321 ret = cmdq_pkt_assign(pkt, high_addr_reg_idx, CMDQ_ADDR_HIGH(dst_addr));
324 ret = cmdq_pkt_write_s(pkt, high_addr_reg_idx, CMDQ_ADDR_LOW(dst_addr), value_reg_idx);
330 EXPORT_SYMBOL(cmdq_pkt_mem_move);
332 int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event, bool clear)
334 struct cmdq_instruction inst = { {0} };
335 u32 clear_option = clear ? CMDQ_WFE_UPDATE : 0;
337 if (event >= CMDQ_MAX_EVENT)
340 inst.op = CMDQ_CODE_WFE;
341 inst.value = CMDQ_WFE_OPTION | clear_option;
344 return cmdq_pkt_append_command(pkt, inst);
346 EXPORT_SYMBOL(cmdq_pkt_wfe);
348 int cmdq_pkt_acquire_event(struct cmdq_pkt *pkt, u16 event)
350 struct cmdq_instruction inst = {};
352 if (event >= CMDQ_MAX_EVENT)
355 inst.op = CMDQ_CODE_WFE;
356 inst.value = CMDQ_WFE_UPDATE | CMDQ_WFE_UPDATE_VALUE | CMDQ_WFE_WAIT;
359 return cmdq_pkt_append_command(pkt, inst);
361 EXPORT_SYMBOL(cmdq_pkt_acquire_event);
363 int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event)
365 struct cmdq_instruction inst = { {0} };
367 if (event >= CMDQ_MAX_EVENT)
370 inst.op = CMDQ_CODE_WFE;
371 inst.value = CMDQ_WFE_UPDATE;
374 return cmdq_pkt_append_command(pkt, inst);
376 EXPORT_SYMBOL(cmdq_pkt_clear_event);
378 int cmdq_pkt_set_event(struct cmdq_pkt *pkt, u16 event)
380 struct cmdq_instruction inst = {};
382 if (event >= CMDQ_MAX_EVENT)
385 inst.op = CMDQ_CODE_WFE;
386 inst.value = CMDQ_WFE_UPDATE | CMDQ_WFE_UPDATE_VALUE;
389 return cmdq_pkt_append_command(pkt, inst);
391 EXPORT_SYMBOL(cmdq_pkt_set_event);
393 int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys,
394 u16 offset, u32 value)
396 struct cmdq_instruction inst = { {0} };
399 inst.op = CMDQ_CODE_POLL;
401 inst.offset = offset;
402 inst.subsys = subsys;
403 err = cmdq_pkt_append_command(pkt, inst);
407 EXPORT_SYMBOL(cmdq_pkt_poll);
409 int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys,
410 u16 offset, u32 value, u32 mask)
412 struct cmdq_instruction inst = { {0} };
415 inst.op = CMDQ_CODE_MASK;
417 err = cmdq_pkt_append_command(pkt, inst);
421 offset = offset | CMDQ_POLL_ENABLE_MASK;
422 err = cmdq_pkt_poll(pkt, subsys, offset, value);
426 EXPORT_SYMBOL(cmdq_pkt_poll_mask);
428 int cmdq_pkt_poll_addr(struct cmdq_pkt *pkt, dma_addr_t addr, u32 value, u32 mask)
430 struct cmdq_instruction inst = { {0} };
435 * Append an MASK instruction to set the mask for following POLL instruction
436 * which enables use_mask bit.
438 if (mask != GENMASK(31, 0)) {
439 inst.op = CMDQ_CODE_MASK;
441 ret = cmdq_pkt_append_command(pkt, inst);
444 use_mask = CMDQ_POLL_ENABLE_MASK;
448 * POLL is an legacy operation in GCE and it does not support SPR and CMDQ_CODE_LOGIC,
449 * so it can not use cmdq_pkt_assign to keep polling register address to SPR.
450 * If user wants to poll a register address which doesn't have a subsys id,
451 * user needs to use GPR and CMDQ_CODE_MASK to move polling register address to GPR.
453 inst.op = CMDQ_CODE_MASK;
454 inst.dst_t = CMDQ_REG_TYPE;
455 inst.sop = CMDQ_POLL_ADDR_GPR;
457 ret = cmdq_pkt_append_command(pkt, inst);
461 /* Append POLL instruction to poll the register address assign to GPR previously. */
462 inst.op = CMDQ_CODE_POLL;
463 inst.dst_t = CMDQ_REG_TYPE;
464 inst.sop = CMDQ_POLL_ADDR_GPR;
465 inst.offset = use_mask;
467 ret = cmdq_pkt_append_command(pkt, inst);
473 EXPORT_SYMBOL(cmdq_pkt_poll_addr);
475 int cmdq_pkt_logic_command(struct cmdq_pkt *pkt, u16 result_reg_idx,
476 struct cmdq_operand *left_operand,
477 enum cmdq_logic_op s_op,
478 struct cmdq_operand *right_operand)
480 struct cmdq_instruction inst = { {0} };
482 if (!left_operand || !right_operand || s_op >= CMDQ_LOGIC_MAX)
485 inst.op = CMDQ_CODE_LOGIC;
486 inst.dst_t = CMDQ_REG_TYPE;
487 inst.src_t = cmdq_operand_get_type(left_operand);
488 inst.arg_c_t = cmdq_operand_get_type(right_operand);
490 inst.reg_dst = result_reg_idx;
491 inst.src_reg = cmdq_operand_get_idx_value(left_operand);
492 inst.arg_c = cmdq_operand_get_idx_value(right_operand);
494 return cmdq_pkt_append_command(pkt, inst);
496 EXPORT_SYMBOL(cmdq_pkt_logic_command);
498 int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value)
500 struct cmdq_instruction inst = {};
502 inst.op = CMDQ_CODE_LOGIC;
503 inst.dst_t = CMDQ_REG_TYPE;
504 inst.reg_dst = reg_idx;
506 return cmdq_pkt_append_command(pkt, inst);
508 EXPORT_SYMBOL(cmdq_pkt_assign);
510 int cmdq_pkt_jump_abs(struct cmdq_pkt *pkt, dma_addr_t addr, u8 shift_pa)
512 struct cmdq_instruction inst = {};
514 inst.op = CMDQ_CODE_JUMP;
515 inst.offset = CMDQ_JUMP_ABSOLUTE;
516 inst.value = addr >> shift_pa;
517 return cmdq_pkt_append_command(pkt, inst);
519 EXPORT_SYMBOL(cmdq_pkt_jump_abs);
521 int cmdq_pkt_jump_rel(struct cmdq_pkt *pkt, s32 offset, u8 shift_pa)
523 struct cmdq_instruction inst = { {0} };
525 inst.op = CMDQ_CODE_JUMP;
526 inst.value = (u32)offset >> shift_pa;
527 return cmdq_pkt_append_command(pkt, inst);
529 EXPORT_SYMBOL(cmdq_pkt_jump_rel);
531 int cmdq_pkt_eoc(struct cmdq_pkt *pkt)
533 struct cmdq_instruction inst = { {0} };
535 inst.op = CMDQ_CODE_EOC;
536 inst.value = CMDQ_EOC_IRQ_EN;
537 return cmdq_pkt_append_command(pkt, inst);
539 EXPORT_SYMBOL(cmdq_pkt_eoc);
541 int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
543 struct cmdq_instruction inst = { {0} };
546 /* insert EOC and generate IRQ for each command iteration */
547 inst.op = CMDQ_CODE_EOC;
548 inst.value = CMDQ_EOC_IRQ_EN;
549 err = cmdq_pkt_append_command(pkt, inst);
554 inst.op = CMDQ_CODE_JUMP;
555 inst.value = CMDQ_JUMP_PASS >>
556 cmdq_get_shift_pa(((struct cmdq_client *)pkt->cl)->chan);
557 err = cmdq_pkt_append_command(pkt, inst);
561 EXPORT_SYMBOL(cmdq_pkt_finalize);
563 MODULE_DESCRIPTION("MediaTek Command Queue (CMDQ) driver");
564 MODULE_LICENSE("GPL v2");