1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2023 Intel Corporation */
4 #include "idpf_controlq.h"
7 * idpf_ctlq_alloc_desc_ring - Allocate Control Queue (CQ) rings
8 * @hw: pointer to hw struct
9 * @cq: pointer to the specific Control queue
11 static int idpf_ctlq_alloc_desc_ring(struct idpf_hw *hw,
12 struct idpf_ctlq_info *cq)
14 size_t size = cq->ring_size * sizeof(struct idpf_ctlq_desc);
16 cq->desc_ring.va = idpf_alloc_dma_mem(hw, &cq->desc_ring, size);
17 if (!cq->desc_ring.va)
24 * idpf_ctlq_alloc_bufs - Allocate Control Queue (CQ) buffers
25 * @hw: pointer to hw struct
26 * @cq: pointer to the specific Control queue
28 * Allocate the buffer head for all control queues, and if it's a receive
29 * queue, allocate DMA buffers
31 static int idpf_ctlq_alloc_bufs(struct idpf_hw *hw,
32 struct idpf_ctlq_info *cq)
36 /* Do not allocate DMA buffers for transmit queues */
37 if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_TX)
40 /* We'll be allocating the buffer info memory first, then we can
41 * allocate the mapped buffers for the event processing
43 cq->bi.rx_buff = kcalloc(cq->ring_size, sizeof(struct idpf_dma_mem *),
48 /* allocate the mapped buffers (except for the last one) */
49 for (i = 0; i < cq->ring_size - 1; i++) {
50 struct idpf_dma_mem *bi;
51 int num = 1; /* number of idpf_dma_mem to be allocated */
53 cq->bi.rx_buff[i] = kcalloc(num, sizeof(struct idpf_dma_mem),
55 if (!cq->bi.rx_buff[i])
56 goto unwind_alloc_cq_bufs;
58 bi = cq->bi.rx_buff[i];
60 bi->va = idpf_alloc_dma_mem(hw, bi, cq->buf_size);
62 /* unwind will not free the failed entry */
63 kfree(cq->bi.rx_buff[i]);
64 goto unwind_alloc_cq_bufs;
71 /* don't try to free the one that failed... */
74 idpf_free_dma_mem(hw, cq->bi.rx_buff[i]);
75 kfree(cq->bi.rx_buff[i]);
77 kfree(cq->bi.rx_buff);
83 * idpf_ctlq_free_desc_ring - Free Control Queue (CQ) rings
84 * @hw: pointer to hw struct
85 * @cq: pointer to the specific Control queue
87 * This assumes the posted send buffers have already been cleaned
90 static void idpf_ctlq_free_desc_ring(struct idpf_hw *hw,
91 struct idpf_ctlq_info *cq)
93 idpf_free_dma_mem(hw, &cq->desc_ring);
97 * idpf_ctlq_free_bufs - Free CQ buffer info elements
98 * @hw: pointer to hw struct
99 * @cq: pointer to the specific Control queue
101 * Free the DMA buffers for RX queues, and DMA buffer header for both RX and TX
102 * queues. The upper layers are expected to manage freeing of TX DMA buffers
104 static void idpf_ctlq_free_bufs(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
108 if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX) {
111 /* free DMA buffers for rx queues*/
112 for (i = 0; i < cq->ring_size; i++) {
113 if (cq->bi.rx_buff[i]) {
114 idpf_free_dma_mem(hw, cq->bi.rx_buff[i]);
115 kfree(cq->bi.rx_buff[i]);
119 bi = (void *)cq->bi.rx_buff;
121 bi = (void *)cq->bi.tx_msg;
124 /* free the buffer header */
129 * idpf_ctlq_dealloc_ring_res - Free memory allocated for control queue
130 * @hw: pointer to hw struct
131 * @cq: pointer to the specific Control queue
133 * Free the memory used by the ring, buffers and other related structures
135 void idpf_ctlq_dealloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
137 /* free ring buffers and the ring itself */
138 idpf_ctlq_free_bufs(hw, cq);
139 idpf_ctlq_free_desc_ring(hw, cq);
143 * idpf_ctlq_alloc_ring_res - allocate memory for descriptor ring and bufs
144 * @hw: pointer to hw struct
145 * @cq: pointer to control queue struct
147 * Do *NOT* hold cq_lock when calling this as the memory allocation routines
148 * called are not going to be atomic context safe
150 int idpf_ctlq_alloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
154 /* allocate the ring memory */
155 err = idpf_ctlq_alloc_desc_ring(hw, cq);
159 /* allocate buffers in the rings */
160 err = idpf_ctlq_alloc_bufs(hw, cq);
162 goto idpf_init_cq_free_ring;
167 idpf_init_cq_free_ring:
168 idpf_free_dma_mem(hw, &cq->desc_ring);