1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2023 Intel Corporation */
4 #include "idpf_controlq.h"
7 * idpf_ctlq_setup_regs - initialize control queue registers
8 * @cq: pointer to the specific control queue
9 * @q_create_info: structs containing info for each queue to be initialized
11 static void idpf_ctlq_setup_regs(struct idpf_ctlq_info *cq,
12 struct idpf_ctlq_create_info *q_create_info)
14 /* set control queue registers in our local struct */
15 cq->reg.head = q_create_info->reg.head;
16 cq->reg.tail = q_create_info->reg.tail;
17 cq->reg.len = q_create_info->reg.len;
18 cq->reg.bah = q_create_info->reg.bah;
19 cq->reg.bal = q_create_info->reg.bal;
20 cq->reg.len_mask = q_create_info->reg.len_mask;
21 cq->reg.len_ena_mask = q_create_info->reg.len_ena_mask;
22 cq->reg.head_mask = q_create_info->reg.head_mask;
26 * idpf_ctlq_init_regs - Initialize control queue registers
27 * @hw: pointer to hw struct
28 * @cq: pointer to the specific Control queue
29 * @is_rxq: true if receive control queue, false otherwise
31 * Initialize registers. The caller is expected to have already initialized the
32 * descriptor ring memory and buffer memory
34 static void idpf_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
37 /* Update tail to post pre-allocated buffers for rx queues */
39 wr32(hw, cq->reg.tail, (u32)(cq->ring_size - 1));
41 /* For non-Mailbox control queues only TAIL need to be set */
45 /* Clear Head for both send or receive */
46 wr32(hw, cq->reg.head, 0);
48 /* set starting point */
49 wr32(hw, cq->reg.bal, lower_32_bits(cq->desc_ring.pa));
50 wr32(hw, cq->reg.bah, upper_32_bits(cq->desc_ring.pa));
51 wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask));
55 * idpf_ctlq_init_rxq_bufs - populate receive queue descriptors with buf
56 * @cq: pointer to the specific Control queue
58 * Record the address of the receive queue DMA buffers in the descriptors.
59 * The buffers must have been previously allocated.
61 static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
65 for (i = 0; i < cq->ring_size; i++) {
66 struct idpf_ctlq_desc *desc = IDPF_CTLQ_DESC(cq, i);
67 struct idpf_dma_mem *bi = cq->bi.rx_buff[i];
69 /* No buffer to post to descriptor, continue */
74 cpu_to_le16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
76 desc->datalen = cpu_to_le16(bi->size);
78 desc->v_opcode_dtype = 0;
80 desc->params.indirect.addr_high =
81 cpu_to_le32(upper_32_bits(bi->pa));
82 desc->params.indirect.addr_low =
83 cpu_to_le32(lower_32_bits(bi->pa));
84 desc->params.indirect.param0 = 0;
85 desc->params.indirect.sw_cookie = 0;
86 desc->params.indirect.v_flags = 0;
91 * idpf_ctlq_shutdown - shutdown the CQ
92 * @hw: pointer to hw struct
93 * @cq: pointer to the specific Control queue
95 * The main shutdown routine for any controq queue
97 static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
99 mutex_lock(&cq->cq_lock);
101 /* free ring buffers and the ring itself */
102 idpf_ctlq_dealloc_ring_res(hw, cq);
104 /* Set ring_size to 0 to indicate uninitialized queue */
107 mutex_unlock(&cq->cq_lock);
108 mutex_destroy(&cq->cq_lock);
112 * idpf_ctlq_add - add one control queue
113 * @hw: pointer to hardware struct
114 * @qinfo: info for queue to be created
115 * @cq_out: (output) double pointer to control queue to be created
117 * Allocate and initialize a control queue and add it to the control queue list.
118 * The cq parameter will be allocated/initialized and passed back to the caller
119 * if no errors occur.
121 * Note: idpf_ctlq_init must be called prior to any calls to idpf_ctlq_add
123 int idpf_ctlq_add(struct idpf_hw *hw,
124 struct idpf_ctlq_create_info *qinfo,
125 struct idpf_ctlq_info **cq_out)
127 struct idpf_ctlq_info *cq;
131 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
135 cq->cq_type = qinfo->type;
136 cq->q_id = qinfo->id;
137 cq->buf_size = qinfo->buf_size;
138 cq->ring_size = qinfo->len;
141 cq->next_to_clean = 0;
142 cq->next_to_post = cq->ring_size - 1;
144 switch (qinfo->type) {
145 case IDPF_CTLQ_TYPE_MAILBOX_RX:
148 case IDPF_CTLQ_TYPE_MAILBOX_TX:
149 err = idpf_ctlq_alloc_ring_res(hw, cq);
160 idpf_ctlq_init_rxq_bufs(cq);
162 /* Allocate the array of msg pointers for TX queues */
163 cq->bi.tx_msg = kcalloc(qinfo->len,
164 sizeof(struct idpf_ctlq_msg *),
166 if (!cq->bi.tx_msg) {
168 goto init_dealloc_q_mem;
172 idpf_ctlq_setup_regs(cq, qinfo);
174 idpf_ctlq_init_regs(hw, cq, is_rxq);
176 mutex_init(&cq->cq_lock);
178 list_add(&cq->cq_list, &hw->cq_list_head);
185 /* free ring buffers and the ring itself */
186 idpf_ctlq_dealloc_ring_res(hw, cq);
194 * idpf_ctlq_remove - deallocate and remove specified control queue
195 * @hw: pointer to hardware struct
196 * @cq: pointer to control queue to be removed
198 void idpf_ctlq_remove(struct idpf_hw *hw,
199 struct idpf_ctlq_info *cq)
201 list_del(&cq->cq_list);
202 idpf_ctlq_shutdown(hw, cq);
207 * idpf_ctlq_init - main initialization routine for all control queues
208 * @hw: pointer to hardware struct
209 * @num_q: number of queues to initialize
210 * @q_info: array of structs containing info for each queue to be initialized
212 * This initializes any number and any type of control queues. This is an all
213 * or nothing routine; if one fails, all previously allocated queues will be
214 * destroyed. This must be called prior to using the individual add/remove
217 int idpf_ctlq_init(struct idpf_hw *hw, u8 num_q,
218 struct idpf_ctlq_create_info *q_info)
220 struct idpf_ctlq_info *cq, *tmp;
224 INIT_LIST_HEAD(&hw->cq_list_head);
226 for (i = 0; i < num_q; i++) {
227 struct idpf_ctlq_create_info *qinfo = q_info + i;
229 err = idpf_ctlq_add(hw, qinfo, &cq);
231 goto init_destroy_qs;
237 list_for_each_entry_safe(cq, tmp, &hw->cq_list_head, cq_list)
238 idpf_ctlq_remove(hw, cq);
244 * idpf_ctlq_deinit - destroy all control queues
245 * @hw: pointer to hw struct
247 void idpf_ctlq_deinit(struct idpf_hw *hw)
249 struct idpf_ctlq_info *cq, *tmp;
251 list_for_each_entry_safe(cq, tmp, &hw->cq_list_head, cq_list)
252 idpf_ctlq_remove(hw, cq);
256 * idpf_ctlq_send - send command to Control Queue (CTQ)
257 * @hw: pointer to hw struct
258 * @cq: handle to control queue struct to send on
259 * @num_q_msg: number of messages to send on control queue
260 * @q_msg: pointer to array of queue messages to be sent
262 * The caller is expected to allocate DMAable buffers and pass them to the
263 * send routine via the q_msg struct / control queue specific data struct.
264 * The control queue will hold a reference to each send message until
265 * the completion for that message has been cleaned.
267 int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
268 u16 num_q_msg, struct idpf_ctlq_msg q_msg[])
270 struct idpf_ctlq_desc *desc;
275 mutex_lock(&cq->cq_lock);
277 /* Ensure there are enough descriptors to send all messages */
278 num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
279 if (num_desc_avail == 0 || num_desc_avail < num_q_msg) {
284 for (i = 0; i < num_q_msg; i++) {
285 struct idpf_ctlq_msg *msg = &q_msg[i];
287 desc = IDPF_CTLQ_DESC(cq, cq->next_to_use);
289 desc->opcode = cpu_to_le16(msg->opcode);
290 desc->pfid_vfid = cpu_to_le16(msg->func_id);
292 desc->v_opcode_dtype = cpu_to_le32(msg->cookie.mbx.chnl_opcode);
293 desc->v_retval = cpu_to_le32(msg->cookie.mbx.chnl_retval);
295 desc->flags = cpu_to_le16((msg->host_id & IDPF_HOST_ID_MASK) <<
296 IDPF_CTLQ_FLAG_HOST_ID_S);
298 struct idpf_dma_mem *buff = msg->ctx.indirect.payload;
300 desc->datalen |= cpu_to_le16(msg->data_len);
301 desc->flags |= cpu_to_le16(IDPF_CTLQ_FLAG_BUF);
302 desc->flags |= cpu_to_le16(IDPF_CTLQ_FLAG_RD);
304 /* Update the address values in the desc with the pa
305 * value for respective buffer
307 desc->params.indirect.addr_high =
308 cpu_to_le32(upper_32_bits(buff->pa));
309 desc->params.indirect.addr_low =
310 cpu_to_le32(lower_32_bits(buff->pa));
312 memcpy(&desc->params, msg->ctx.indirect.context,
313 IDPF_INDIRECT_CTX_SIZE);
315 memcpy(&desc->params, msg->ctx.direct,
316 IDPF_DIRECT_CTX_SIZE);
319 /* Store buffer info */
320 cq->bi.tx_msg[cq->next_to_use] = msg;
323 if (cq->next_to_use == cq->ring_size)
327 /* Force memory write to complete before letting hardware
328 * know that there are new descriptors to fetch.
332 wr32(hw, cq->reg.tail, cq->next_to_use);
335 mutex_unlock(&cq->cq_lock);
341 * idpf_ctlq_clean_sq - reclaim send descriptors on HW write back for the
343 * @cq: pointer to the specific Control queue
344 * @clean_count: (input|output) number of descriptors to clean as input, and
345 * number of descriptors actually cleaned as output
346 * @msg_status: (output) pointer to msg pointer array to be populated; needs
347 * to be allocated by caller
349 * Returns an array of message pointers associated with the cleaned
350 * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
351 * descriptors. The status will be returned for each; any messages that failed
352 * to send will have a non-zero status. The caller is expected to free original
353 * ctlq_msgs and free or reuse the DMA buffers.
355 int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
356 struct idpf_ctlq_msg *msg_status[])
358 struct idpf_ctlq_desc *desc;
362 if (*clean_count == 0)
364 if (*clean_count > cq->ring_size)
367 mutex_lock(&cq->cq_lock);
369 ntc = cq->next_to_clean;
371 num_to_clean = *clean_count;
373 for (i = 0; i < num_to_clean; i++) {
374 /* Fetch next descriptor and check if marked as done */
375 desc = IDPF_CTLQ_DESC(cq, ntc);
376 if (!(le16_to_cpu(desc->flags) & IDPF_CTLQ_FLAG_DD))
379 /* strip off FW internal code */
380 desc_err = le16_to_cpu(desc->ret_val) & 0xff;
382 msg_status[i] = cq->bi.tx_msg[ntc];
383 msg_status[i]->status = desc_err;
385 cq->bi.tx_msg[ntc] = NULL;
387 /* Zero out any stale data */
388 memset(desc, 0, sizeof(*desc));
391 if (ntc == cq->ring_size)
395 cq->next_to_clean = ntc;
397 mutex_unlock(&cq->cq_lock);
399 /* Return number of descriptors actually cleaned */
406 * idpf_ctlq_post_rx_buffs - post buffers to descriptor ring
407 * @hw: pointer to hw struct
408 * @cq: pointer to control queue handle
409 * @buff_count: (input|output) input is number of buffers caller is trying to
410 * return; output is number of buffers that were not posted
411 * @buffs: array of pointers to dma mem structs to be given to hardware
413 * Caller uses this function to return DMA buffers to the descriptor ring after
414 * consuming them; buff_count will be the number of buffers.
416 * Note: this function needs to be called after a receive call even
417 * if there are no DMA buffers to be returned, i.e. buff_count = 0,
418 * buffs = NULL to support direct commands
420 int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
421 u16 *buff_count, struct idpf_dma_mem **buffs)
423 struct idpf_ctlq_desc *desc;
424 u16 ntp = cq->next_to_post;
425 bool buffs_avail = false;
429 if (*buff_count > cq->ring_size)
435 mutex_lock(&cq->cq_lock);
437 if (tbp >= cq->ring_size)
440 if (tbp == cq->next_to_clean)
444 /* Post buffers for as many as provided or up until the last one used */
445 while (ntp != cq->next_to_clean) {
446 desc = IDPF_CTLQ_DESC(cq, ntp);
448 if (cq->bi.rx_buff[ntp])
451 /* If the caller hasn't given us any buffers or
452 * there are none left, search the ring itself
453 * for an available buffer to move to this
454 * entry starting at the next entry in the ring
458 /* Wrap ring if necessary */
459 if (tbp >= cq->ring_size)
462 while (tbp != cq->next_to_clean) {
463 if (cq->bi.rx_buff[tbp]) {
464 cq->bi.rx_buff[ntp] =
466 cq->bi.rx_buff[tbp] = NULL;
468 /* Found a buffer, no need to
474 /* Wrap ring if necessary */
476 if (tbp >= cq->ring_size)
480 if (tbp == cq->next_to_clean)
483 /* Give back pointer to DMA buffer */
484 cq->bi.rx_buff[ntp] = buffs[i];
487 if (i >= *buff_count)
493 cpu_to_le16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
495 /* Post buffers to descriptor */
496 desc->datalen = cpu_to_le16(cq->bi.rx_buff[ntp]->size);
497 desc->params.indirect.addr_high =
498 cpu_to_le32(upper_32_bits(cq->bi.rx_buff[ntp]->pa));
499 desc->params.indirect.addr_low =
500 cpu_to_le32(lower_32_bits(cq->bi.rx_buff[ntp]->pa));
503 if (ntp == cq->ring_size)
508 /* Only update tail if buffers were actually posted */
509 if (cq->next_to_post != ntp) {
511 /* Update next_to_post to ntp - 1 since current ntp
512 * will not have a buffer
514 cq->next_to_post = ntp - 1;
516 /* Wrap to end of end ring since current ntp is 0 */
517 cq->next_to_post = cq->ring_size - 1;
521 wr32(hw, cq->reg.tail, cq->next_to_post);
524 mutex_unlock(&cq->cq_lock);
526 /* return the number of buffers that were not posted */
527 *buff_count = *buff_count - i;
533 * idpf_ctlq_recv - receive control queue message call back
534 * @cq: pointer to control queue handle to receive on
535 * @num_q_msg: (input|output) input number of messages that should be received;
536 * output number of messages actually received
537 * @q_msg: (output) array of received control queue messages on this q;
538 * needs to be pre-allocated by caller for as many messages as requested
540 * Called by interrupt handler or polling mechanism. Caller is expected
543 int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
544 struct idpf_ctlq_msg *q_msg)
546 u16 num_to_clean, ntc, flags;
547 struct idpf_ctlq_desc *desc;
551 /* take the lock before we start messing with the ring */
552 mutex_lock(&cq->cq_lock);
554 ntc = cq->next_to_clean;
556 num_to_clean = *num_q_msg;
558 for (i = 0; i < num_to_clean; i++) {
559 /* Fetch next descriptor and check if marked as done */
560 desc = IDPF_CTLQ_DESC(cq, ntc);
561 flags = le16_to_cpu(desc->flags);
563 if (!(flags & IDPF_CTLQ_FLAG_DD))
566 q_msg[i].vmvf_type = (flags &
567 (IDPF_CTLQ_FLAG_FTYPE_VM |
568 IDPF_CTLQ_FLAG_FTYPE_PF)) >>
569 IDPF_CTLQ_FLAG_FTYPE_S;
571 if (flags & IDPF_CTLQ_FLAG_ERR)
574 q_msg[i].cookie.mbx.chnl_opcode =
575 le32_to_cpu(desc->v_opcode_dtype);
576 q_msg[i].cookie.mbx.chnl_retval =
577 le32_to_cpu(desc->v_retval);
579 q_msg[i].opcode = le16_to_cpu(desc->opcode);
580 q_msg[i].data_len = le16_to_cpu(desc->datalen);
581 q_msg[i].status = le16_to_cpu(desc->ret_val);
584 memcpy(q_msg[i].ctx.indirect.context,
585 &desc->params.indirect, IDPF_INDIRECT_CTX_SIZE);
587 /* Assign pointer to dma buffer to ctlq_msg array
588 * to be given to upper layer
590 q_msg[i].ctx.indirect.payload = cq->bi.rx_buff[ntc];
592 /* Zero out pointer to DMA buffer info;
593 * will be repopulated by post buffers API
595 cq->bi.rx_buff[ntc] = NULL;
597 memcpy(q_msg[i].ctx.direct, desc->params.raw,
598 IDPF_DIRECT_CTX_SIZE);
601 /* Zero out stale data in descriptor */
602 memset(desc, 0, sizeof(struct idpf_ctlq_desc));
605 if (ntc == cq->ring_size)
609 cq->next_to_clean = ntc;
611 mutex_unlock(&cq->cq_lock);