1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2017-2019 NXP */
6 int enetc_setup_cbdr(struct device *dev, struct enetc_hw *hw, int bd_count,
7 struct enetc_cbdr *cbdr)
9 int size = bd_count * sizeof(struct enetc_cbd);
11 cbdr->bd_base = dma_alloc_coherent(dev, size, &cbdr->bd_dma_base,
16 /* h/w requires 128B alignment */
17 if (!IS_ALIGNED(cbdr->bd_dma_base, 128)) {
18 dma_free_coherent(dev, size, cbdr->bd_base,
23 cbdr->next_to_clean = 0;
24 cbdr->next_to_use = 0;
26 cbdr->bd_count = bd_count;
28 cbdr->pir = hw->reg + ENETC_SICBDRPIR;
29 cbdr->cir = hw->reg + ENETC_SICBDRCIR;
30 cbdr->mr = hw->reg + ENETC_SICBDRMR;
32 /* set CBDR cache attributes */
33 enetc_wr(hw, ENETC_SICAR2,
34 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
36 enetc_wr(hw, ENETC_SICBDRBAR0, lower_32_bits(cbdr->bd_dma_base));
37 enetc_wr(hw, ENETC_SICBDRBAR1, upper_32_bits(cbdr->bd_dma_base));
38 enetc_wr(hw, ENETC_SICBDRLENR, ENETC_RTBLENR_LEN(cbdr->bd_count));
40 enetc_wr_reg(cbdr->pir, cbdr->next_to_clean);
41 enetc_wr_reg(cbdr->cir, cbdr->next_to_use);
43 enetc_wr_reg(cbdr->mr, BIT(31));
48 void enetc_teardown_cbdr(struct enetc_cbdr *cbdr)
50 int size = cbdr->bd_count * sizeof(struct enetc_cbd);
53 enetc_wr_reg(cbdr->mr, 0);
55 dma_free_coherent(cbdr->dma_dev, size, cbdr->bd_base,
61 static void enetc_clean_cbdr(struct enetc_cbdr *ring)
63 struct enetc_cbd *dest_cbd;
66 i = ring->next_to_clean;
68 while (enetc_rd_reg(ring->cir) != i) {
69 dest_cbd = ENETC_CBD(*ring, i);
70 status = dest_cbd->status_flags & ENETC_CBD_STATUS_MASK;
72 dev_warn(ring->dma_dev, "CMD err %04x for cmd %04x\n",
73 status, dest_cbd->cmd);
75 memset(dest_cbd, 0, sizeof(*dest_cbd));
77 i = (i + 1) % ring->bd_count;
80 ring->next_to_clean = i;
83 static int enetc_cbd_unused(struct enetc_cbdr *r)
85 return (r->next_to_clean - r->next_to_use - 1 + r->bd_count) %
89 int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd)
91 struct enetc_cbdr *ring = &si->cbd_ring;
92 int timeout = ENETC_CBDR_TIMEOUT;
93 struct enetc_cbd *dest_cbd;
96 if (unlikely(!ring->bd_base))
99 if (unlikely(!enetc_cbd_unused(ring)))
100 enetc_clean_cbdr(ring);
102 i = ring->next_to_use;
103 dest_cbd = ENETC_CBD(*ring, i);
105 /* copy command to the ring */
107 i = (i + 1) % ring->bd_count;
109 ring->next_to_use = i;
110 /* let H/W know BD ring has been updated */
111 enetc_wr_reg(ring->pir, i);
114 if (enetc_rd_reg(ring->cir) == i)
116 udelay(10); /* cannot sleep, rtnl_lock() */
123 /* CBD may writeback data, feedback up level */
126 enetc_clean_cbdr(ring);
131 int enetc_clear_mac_flt_entry(struct enetc_si *si, int index)
133 struct enetc_cbd cbd;
135 memset(&cbd, 0, sizeof(cbd));
138 cbd.status_flags = ENETC_CBD_FLAGS_SF;
139 cbd.index = cpu_to_le16(index);
141 return enetc_send_cmd(si, &cbd);
144 int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
145 char *mac_addr, int si_map)
147 struct enetc_cbd cbd;
151 memset(&cbd, 0, sizeof(cbd));
153 /* fill up the "set" descriptor */
155 cbd.status_flags = ENETC_CBD_FLAGS_SF;
156 cbd.index = cpu_to_le16(index);
157 cbd.opt[3] = cpu_to_le32(si_map);
159 cbd.opt[0] = cpu_to_le32(BIT(31));
161 upper = *(const u32 *)mac_addr;
162 lower = *(const u16 *)(mac_addr + 4);
163 cbd.addr[0] = cpu_to_le32(upper);
164 cbd.addr[1] = cpu_to_le32(lower);
166 return enetc_send_cmd(si, &cbd);
169 #define RFSE_ALIGN 64
170 /* Set entry in RFS table */
171 int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
174 struct enetc_cbdr *ring = &si->cbd_ring;
175 struct enetc_cbd cbd = {.cmd = 0};
176 dma_addr_t dma, dma_align;
177 void *tmp, *tmp_align;
180 /* fill up the "set" descriptor */
183 cbd.index = cpu_to_le16(index);
184 cbd.length = cpu_to_le16(sizeof(*rfse));
185 cbd.opt[3] = cpu_to_le32(0); /* SI */
187 tmp = dma_alloc_coherent(ring->dma_dev, sizeof(*rfse) + RFSE_ALIGN,
190 dev_err(ring->dma_dev, "DMA mapping of RFS entry failed!\n");
194 dma_align = ALIGN(dma, RFSE_ALIGN);
195 tmp_align = PTR_ALIGN(tmp, RFSE_ALIGN);
196 memcpy(tmp_align, rfse, sizeof(*rfse));
198 cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align));
199 cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align));
201 err = enetc_send_cmd(si, &cbd);
203 dev_err(ring->dma_dev, "FS entry add failed (%d)!", err);
205 dma_free_coherent(ring->dma_dev, sizeof(*rfse) + RFSE_ALIGN,
211 #define RSSE_ALIGN 64
212 static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count,
215 struct enetc_cbdr *ring = &si->cbd_ring;
216 struct enetc_cbd cbd = {.cmd = 0};
217 dma_addr_t dma, dma_align;
221 if (count < RSSE_ALIGN)
222 /* HW only takes in a full 64 entry table */
225 tmp = dma_alloc_coherent(ring->dma_dev, count + RSSE_ALIGN,
228 dev_err(ring->dma_dev, "DMA mapping of RSS table failed!\n");
231 dma_align = ALIGN(dma, RSSE_ALIGN);
232 tmp_align = PTR_ALIGN(tmp, RSSE_ALIGN);
235 for (i = 0; i < count; i++)
236 tmp_align[i] = (u8)(table[i]);
238 /* fill up the descriptor */
239 cbd.cmd = read ? 2 : 1;
241 cbd.length = cpu_to_le16(count);
243 cbd.addr[0] = cpu_to_le32(lower_32_bits(dma_align));
244 cbd.addr[1] = cpu_to_le32(upper_32_bits(dma_align));
246 err = enetc_send_cmd(si, &cbd);
248 dev_err(ring->dma_dev, "RSS cmd failed (%d)!", err);
251 for (i = 0; i < count; i++)
252 table[i] = tmp_align[i];
254 dma_free_coherent(ring->dma_dev, count + RSSE_ALIGN, tmp, dma);
260 int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count)
262 return enetc_cmd_rss_table(si, table, count, true);
266 int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count)
268 return enetc_cmd_rss_table(si, (u32 *)table, count, false);