]>
Commit | Line | Data |
---|---|---|
0a714186 BT |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright(c) 2018 Intel Corporation. */ | |
3 | ||
4 | #include <linux/bpf_trace.h> | |
5 | #include <net/xdp_sock.h> | |
6 | #include <net/xdp.h> | |
7 | ||
8 | #include "i40e.h" | |
9 | #include "i40e_txrx_common.h" | |
10 | #include "i40e_xsk.h" | |
11 | ||
12 | /** | |
13 | * i40e_alloc_xsk_umems - Allocate an array to store per ring UMEMs | |
14 | * @vsi: Current VSI | |
15 | * | |
16 | * Returns 0 on success, <0 on failure | |
17 | **/ | |
18 | static int i40e_alloc_xsk_umems(struct i40e_vsi *vsi) | |
19 | { | |
20 | if (vsi->xsk_umems) | |
21 | return 0; | |
22 | ||
23 | vsi->num_xsk_umems_used = 0; | |
24 | vsi->num_xsk_umems = vsi->alloc_queue_pairs; | |
25 | vsi->xsk_umems = kcalloc(vsi->num_xsk_umems, sizeof(*vsi->xsk_umems), | |
26 | GFP_KERNEL); | |
27 | if (!vsi->xsk_umems) { | |
28 | vsi->num_xsk_umems = 0; | |
29 | return -ENOMEM; | |
30 | } | |
31 | ||
32 | return 0; | |
33 | } | |
34 | ||
35 | /** | |
529eb362 | 36 | * i40e_add_xsk_umem - Store a UMEM for a certain ring/qid |
0a714186 BT |
37 | * @vsi: Current VSI |
38 | * @umem: UMEM to store | |
39 | * @qid: Ring/qid to associate with the UMEM | |
40 | * | |
41 | * Returns 0 on success, <0 on failure | |
42 | **/ | |
43 | static int i40e_add_xsk_umem(struct i40e_vsi *vsi, struct xdp_umem *umem, | |
44 | u16 qid) | |
45 | { | |
46 | int err; | |
47 | ||
48 | err = i40e_alloc_xsk_umems(vsi); | |
49 | if (err) | |
50 | return err; | |
51 | ||
52 | vsi->xsk_umems[qid] = umem; | |
53 | vsi->num_xsk_umems_used++; | |
54 | ||
55 | return 0; | |
56 | } | |
57 | ||
58 | /** | |
529eb362 | 59 | * i40e_remove_xsk_umem - Remove a UMEM for a certain ring/qid |
0a714186 BT |
60 | * @vsi: Current VSI |
61 | * @qid: Ring/qid associated with the UMEM | |
62 | **/ | |
63 | static void i40e_remove_xsk_umem(struct i40e_vsi *vsi, u16 qid) | |
64 | { | |
65 | vsi->xsk_umems[qid] = NULL; | |
66 | vsi->num_xsk_umems_used--; | |
67 | ||
68 | if (vsi->num_xsk_umems == 0) { | |
69 | kfree(vsi->xsk_umems); | |
70 | vsi->xsk_umems = NULL; | |
71 | vsi->num_xsk_umems = 0; | |
72 | } | |
73 | } | |
74 | ||
75 | /** | |
76 | * i40e_xsk_umem_dma_map - DMA maps all UMEM memory for the netdev | |
77 | * @vsi: Current VSI | |
78 | * @umem: UMEM to DMA map | |
79 | * | |
80 | * Returns 0 on success, <0 on failure | |
81 | **/ | |
82 | static int i40e_xsk_umem_dma_map(struct i40e_vsi *vsi, struct xdp_umem *umem) | |
83 | { | |
84 | struct i40e_pf *pf = vsi->back; | |
85 | struct device *dev; | |
86 | unsigned int i, j; | |
87 | dma_addr_t dma; | |
88 | ||
89 | dev = &pf->pdev->dev; | |
90 | for (i = 0; i < umem->npgs; i++) { | |
91 | dma = dma_map_page_attrs(dev, umem->pgs[i], 0, PAGE_SIZE, | |
92 | DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR); | |
93 | if (dma_mapping_error(dev, dma)) | |
94 | goto out_unmap; | |
95 | ||
96 | umem->pages[i].dma = dma; | |
97 | } | |
98 | ||
99 | return 0; | |
100 | ||
101 | out_unmap: | |
102 | for (j = 0; j < i; j++) { | |
103 | dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE, | |
104 | DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR); | |
105 | umem->pages[i].dma = 0; | |
106 | } | |
107 | ||
108 | return -1; | |
109 | } | |
110 | ||
111 | /** | |
112 | * i40e_xsk_umem_dma_unmap - DMA unmaps all UMEM memory for the netdev | |
113 | * @vsi: Current VSI | |
114 | * @umem: UMEM to DMA map | |
115 | **/ | |
116 | static void i40e_xsk_umem_dma_unmap(struct i40e_vsi *vsi, struct xdp_umem *umem) | |
117 | { | |
118 | struct i40e_pf *pf = vsi->back; | |
119 | struct device *dev; | |
120 | unsigned int i; | |
121 | ||
122 | dev = &pf->pdev->dev; | |
123 | ||
124 | for (i = 0; i < umem->npgs; i++) { | |
125 | dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE, | |
126 | DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR); | |
127 | ||
128 | umem->pages[i].dma = 0; | |
129 | } | |
130 | } | |
131 | ||
132 | /** | |
529eb362 | 133 | * i40e_xsk_umem_enable - Enable/associate a UMEM to a certain ring/qid |
0a714186 BT |
134 | * @vsi: Current VSI |
135 | * @umem: UMEM | |
136 | * @qid: Rx ring to associate UMEM to | |
137 | * | |
138 | * Returns 0 on success, <0 on failure | |
139 | **/ | |
140 | static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem, | |
141 | u16 qid) | |
142 | { | |
411dc16f | 143 | struct xdp_umem_fq_reuse *reuseq; |
0a714186 BT |
144 | bool if_running; |
145 | int err; | |
146 | ||
147 | if (vsi->type != I40E_VSI_MAIN) | |
148 | return -EINVAL; | |
149 | ||
150 | if (qid >= vsi->num_queue_pairs) | |
151 | return -EINVAL; | |
152 | ||
153 | if (vsi->xsk_umems) { | |
154 | if (qid >= vsi->num_xsk_umems) | |
155 | return -EINVAL; | |
156 | if (vsi->xsk_umems[qid]) | |
157 | return -EBUSY; | |
158 | } | |
159 | ||
411dc16f BT |
160 | reuseq = xsk_reuseq_prepare(vsi->rx_rings[0]->count); |
161 | if (!reuseq) | |
162 | return -ENOMEM; | |
163 | ||
164 | xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq)); | |
165 | ||
0a714186 BT |
166 | err = i40e_xsk_umem_dma_map(vsi, umem); |
167 | if (err) | |
168 | return err; | |
169 | ||
170 | if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); | |
171 | ||
172 | if (if_running) { | |
173 | err = i40e_queue_pair_disable(vsi, qid); | |
174 | if (err) | |
175 | return err; | |
176 | } | |
177 | ||
178 | err = i40e_add_xsk_umem(vsi, umem, qid); | |
179 | if (err) | |
180 | return err; | |
181 | ||
182 | if (if_running) { | |
183 | err = i40e_queue_pair_enable(vsi, qid); | |
184 | if (err) | |
185 | return err; | |
186 | } | |
187 | ||
188 | return 0; | |
189 | } | |
190 | ||
191 | /** | |
529eb362 | 192 | * i40e_xsk_umem_disable - Disassociate a UMEM from a certain ring/qid |
0a714186 BT |
193 | * @vsi: Current VSI |
194 | * @qid: Rx ring to associate UMEM to | |
195 | * | |
196 | * Returns 0 on success, <0 on failure | |
197 | **/ | |
198 | static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid) | |
199 | { | |
200 | bool if_running; | |
201 | int err; | |
202 | ||
203 | if (!vsi->xsk_umems || qid >= vsi->num_xsk_umems || | |
204 | !vsi->xsk_umems[qid]) | |
205 | return -EINVAL; | |
206 | ||
207 | if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); | |
208 | ||
209 | if (if_running) { | |
210 | err = i40e_queue_pair_disable(vsi, qid); | |
211 | if (err) | |
212 | return err; | |
213 | } | |
214 | ||
215 | i40e_xsk_umem_dma_unmap(vsi, vsi->xsk_umems[qid]); | |
216 | i40e_remove_xsk_umem(vsi, qid); | |
217 | ||
218 | if (if_running) { | |
219 | err = i40e_queue_pair_enable(vsi, qid); | |
220 | if (err) | |
221 | return err; | |
222 | } | |
223 | ||
224 | return 0; | |
225 | } | |
226 | ||
227 | /** | |
228 | * i40e_xsk_umem_query - Queries a certain ring/qid for its UMEM | |
229 | * @vsi: Current VSI | |
230 | * @umem: UMEM associated to the ring, if any | |
231 | * @qid: Rx ring to associate UMEM to | |
232 | * | |
233 | * This function will store, if any, the UMEM associated to certain ring. | |
234 | * | |
235 | * Returns 0 on success, <0 on failure | |
236 | **/ | |
237 | int i40e_xsk_umem_query(struct i40e_vsi *vsi, struct xdp_umem **umem, | |
238 | u16 qid) | |
239 | { | |
240 | if (vsi->type != I40E_VSI_MAIN) | |
241 | return -EINVAL; | |
242 | ||
243 | if (qid >= vsi->num_queue_pairs) | |
244 | return -EINVAL; | |
245 | ||
246 | if (vsi->xsk_umems) { | |
247 | if (qid >= vsi->num_xsk_umems) | |
248 | return -EINVAL; | |
249 | *umem = vsi->xsk_umems[qid]; | |
250 | return 0; | |
251 | } | |
252 | ||
253 | *umem = NULL; | |
254 | return 0; | |
255 | } | |
256 | ||
257 | /** | |
529eb362 | 258 | * i40e_xsk_umem_setup - Enable/disassociate a UMEM to/from a ring/qid |
0a714186 BT |
259 | * @vsi: Current VSI |
260 | * @umem: UMEM to enable/associate to a ring, or NULL to disable | |
261 | * @qid: Rx ring to (dis)associate UMEM (from)to | |
262 | * | |
529eb362 | 263 | * This function enables or disables a UMEM to a certain ring. |
0a714186 BT |
264 | * |
265 | * Returns 0 on success, <0 on failure | |
266 | **/ | |
267 | int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem, | |
268 | u16 qid) | |
269 | { | |
270 | return umem ? i40e_xsk_umem_enable(vsi, umem, qid) : | |
271 | i40e_xsk_umem_disable(vsi, qid); | |
272 | } | |
273 | ||
274 | /** | |
275 | * i40e_run_xdp_zc - Executes an XDP program on an xdp_buff | |
276 | * @rx_ring: Rx ring | |
277 | * @xdp: xdp_buff used as input to the XDP program | |
278 | * | |
529eb362 | 279 | * This function enables or disables a UMEM to a certain ring. |
0a714186 BT |
280 | * |
281 | * Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR} | |
282 | **/ | |
283 | static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) | |
284 | { | |
285 | int err, result = I40E_XDP_PASS; | |
286 | struct i40e_ring *xdp_ring; | |
287 | struct bpf_prog *xdp_prog; | |
288 | u32 act; | |
289 | ||
290 | rcu_read_lock(); | |
291 | /* NB! xdp_prog will always be !NULL, due to the fact that | |
292 | * this path is enabled by setting an XDP program. | |
293 | */ | |
294 | xdp_prog = READ_ONCE(rx_ring->xdp_prog); | |
295 | act = bpf_prog_run_xdp(xdp_prog, xdp); | |
296 | xdp->handle += xdp->data - xdp->data_hard_start; | |
297 | switch (act) { | |
298 | case XDP_PASS: | |
299 | break; | |
300 | case XDP_TX: | |
301 | xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; | |
302 | result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring); | |
303 | break; | |
304 | case XDP_REDIRECT: | |
305 | err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); | |
306 | result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED; | |
307 | break; | |
308 | default: | |
309 | bpf_warn_invalid_xdp_action(act); | |
310 | case XDP_ABORTED: | |
311 | trace_xdp_exception(rx_ring->netdev, xdp_prog, act); | |
312 | /* fallthrough -- handle aborts by dropping packet */ | |
313 | case XDP_DROP: | |
314 | result = I40E_XDP_CONSUMED; | |
315 | break; | |
316 | } | |
317 | rcu_read_unlock(); | |
318 | return result; | |
319 | } | |
320 | ||
321 | /** | |
322 | * i40e_alloc_buffer_zc - Allocates an i40e_rx_buffer | |
323 | * @rx_ring: Rx ring | |
324 | * @bi: Rx buffer to populate | |
325 | * | |
326 | * This function allocates an Rx buffer. The buffer can come from fill | |
327 | * queue, or via the recycle queue (next_to_alloc). | |
328 | * | |
329 | * Returns true for a successful allocation, false otherwise | |
330 | **/ | |
331 | static bool i40e_alloc_buffer_zc(struct i40e_ring *rx_ring, | |
332 | struct i40e_rx_buffer *bi) | |
333 | { | |
334 | struct xdp_umem *umem = rx_ring->xsk_umem; | |
335 | void *addr = bi->addr; | |
336 | u64 handle, hr; | |
337 | ||
338 | if (addr) { | |
339 | rx_ring->rx_stats.page_reuse_count++; | |
340 | return true; | |
341 | } | |
342 | ||
343 | if (!xsk_umem_peek_addr(umem, &handle)) { | |
344 | rx_ring->rx_stats.alloc_page_failed++; | |
345 | return false; | |
346 | } | |
347 | ||
348 | hr = umem->headroom + XDP_PACKET_HEADROOM; | |
349 | ||
350 | bi->dma = xdp_umem_get_dma(umem, handle); | |
351 | bi->dma += hr; | |
352 | ||
353 | bi->addr = xdp_umem_get_data(umem, handle); | |
354 | bi->addr += hr; | |
355 | ||
356 | bi->handle = handle + umem->headroom; | |
357 | ||
358 | xsk_umem_discard_addr(umem); | |
359 | return true; | |
360 | } | |
361 | ||
362 | /** | |
411dc16f | 363 | * i40e_alloc_buffer_slow_zc - Allocates an i40e_rx_buffer |
0a714186 | 364 | * @rx_ring: Rx ring |
411dc16f | 365 | * @bi: Rx buffer to populate |
0a714186 | 366 | * |
411dc16f BT |
367 | * This function allocates an Rx buffer. The buffer can come from fill |
368 | * queue, or via the reuse queue. | |
0a714186 BT |
369 | * |
370 | * Returns true for a successful allocation, false otherwise | |
371 | **/ | |
411dc16f BT |
372 | static bool i40e_alloc_buffer_slow_zc(struct i40e_ring *rx_ring, |
373 | struct i40e_rx_buffer *bi) | |
374 | { | |
375 | struct xdp_umem *umem = rx_ring->xsk_umem; | |
376 | u64 handle, hr; | |
377 | ||
378 | if (!xsk_umem_peek_addr_rq(umem, &handle)) { | |
379 | rx_ring->rx_stats.alloc_page_failed++; | |
380 | return false; | |
381 | } | |
382 | ||
383 | handle &= rx_ring->xsk_umem->chunk_mask; | |
384 | ||
385 | hr = umem->headroom + XDP_PACKET_HEADROOM; | |
386 | ||
387 | bi->dma = xdp_umem_get_dma(umem, handle); | |
388 | bi->dma += hr; | |
389 | ||
390 | bi->addr = xdp_umem_get_data(umem, handle); | |
391 | bi->addr += hr; | |
392 | ||
393 | bi->handle = handle + umem->headroom; | |
394 | ||
395 | xsk_umem_discard_addr_rq(umem); | |
396 | return true; | |
397 | } | |
398 | ||
399 | static __always_inline bool | |
400 | __i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count, | |
401 | bool alloc(struct i40e_ring *rx_ring, | |
402 | struct i40e_rx_buffer *bi)) | |
0a714186 BT |
403 | { |
404 | u16 ntu = rx_ring->next_to_use; | |
405 | union i40e_rx_desc *rx_desc; | |
406 | struct i40e_rx_buffer *bi; | |
407 | bool ok = true; | |
408 | ||
409 | rx_desc = I40E_RX_DESC(rx_ring, ntu); | |
410 | bi = &rx_ring->rx_bi[ntu]; | |
411 | do { | |
411dc16f | 412 | if (!alloc(rx_ring, bi)) { |
0a714186 BT |
413 | ok = false; |
414 | goto no_buffers; | |
415 | } | |
416 | ||
417 | dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 0, | |
418 | rx_ring->rx_buf_len, | |
419 | DMA_BIDIRECTIONAL); | |
420 | ||
421 | rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); | |
422 | ||
423 | rx_desc++; | |
424 | bi++; | |
425 | ntu++; | |
426 | ||
427 | if (unlikely(ntu == rx_ring->count)) { | |
428 | rx_desc = I40E_RX_DESC(rx_ring, 0); | |
429 | bi = rx_ring->rx_bi; | |
430 | ntu = 0; | |
431 | } | |
432 | ||
433 | rx_desc->wb.qword1.status_error_len = 0; | |
434 | count--; | |
435 | } while (count); | |
436 | ||
437 | no_buffers: | |
438 | if (rx_ring->next_to_use != ntu) | |
439 | i40e_release_rx_desc(rx_ring, ntu); | |
440 | ||
441 | return ok; | |
442 | } | |
443 | ||
411dc16f BT |
444 | /** |
445 | * i40e_alloc_rx_buffers_zc - Allocates a number of Rx buffers | |
446 | * @rx_ring: Rx ring | |
447 | * @count: The number of buffers to allocate | |
448 | * | |
449 | * This function allocates a number of Rx buffers from the reuse queue | |
450 | * or fill ring and places them on the Rx ring. | |
451 | * | |
452 | * Returns true for a successful allocation, false otherwise | |
453 | **/ | |
454 | bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count) | |
455 | { | |
456 | return __i40e_alloc_rx_buffers_zc(rx_ring, count, | |
457 | i40e_alloc_buffer_slow_zc); | |
458 | } | |
459 | ||
460 | /** | |
461 | * i40e_alloc_rx_buffers_fast_zc - Allocates a number of Rx buffers | |
462 | * @rx_ring: Rx ring | |
463 | * @count: The number of buffers to allocate | |
464 | * | |
465 | * This function allocates a number of Rx buffers from the fill ring | |
466 | * or the internal recycle mechanism and places them on the Rx ring. | |
467 | * | |
468 | * Returns true for a successful allocation, false otherwise | |
469 | **/ | |
470 | static bool i40e_alloc_rx_buffers_fast_zc(struct i40e_ring *rx_ring, u16 count) | |
471 | { | |
472 | return __i40e_alloc_rx_buffers_zc(rx_ring, count, | |
473 | i40e_alloc_buffer_zc); | |
474 | } | |
475 | ||
0a714186 BT |
476 | /** |
477 | * i40e_get_rx_buffer_zc - Return the current Rx buffer | |
478 | * @rx_ring: Rx ring | |
479 | * @size: The size of the rx buffer (read from descriptor) | |
480 | * | |
481 | * This function returns the current, received Rx buffer, and also | |
482 | * does DMA synchronization. the Rx ring. | |
483 | * | |
484 | * Returns the received Rx buffer | |
485 | **/ | |
486 | static struct i40e_rx_buffer *i40e_get_rx_buffer_zc(struct i40e_ring *rx_ring, | |
487 | const unsigned int size) | |
488 | { | |
489 | struct i40e_rx_buffer *bi; | |
490 | ||
491 | bi = &rx_ring->rx_bi[rx_ring->next_to_clean]; | |
492 | ||
493 | /* we are reusing so sync this buffer for CPU use */ | |
494 | dma_sync_single_range_for_cpu(rx_ring->dev, | |
495 | bi->dma, 0, | |
496 | size, | |
497 | DMA_BIDIRECTIONAL); | |
498 | ||
499 | return bi; | |
500 | } | |
501 | ||
502 | /** | |
503 | * i40e_reuse_rx_buffer_zc - Recycle an Rx buffer | |
504 | * @rx_ring: Rx ring | |
505 | * @old_bi: The Rx buffer to recycle | |
506 | * | |
507 | * This function recycles a finished Rx buffer, and places it on the | |
508 | * recycle queue (next_to_alloc). | |
509 | **/ | |
510 | static void i40e_reuse_rx_buffer_zc(struct i40e_ring *rx_ring, | |
511 | struct i40e_rx_buffer *old_bi) | |
512 | { | |
513 | struct i40e_rx_buffer *new_bi = &rx_ring->rx_bi[rx_ring->next_to_alloc]; | |
93ee30f3 | 514 | unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask; |
0a714186 BT |
515 | u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM; |
516 | u16 nta = rx_ring->next_to_alloc; | |
517 | ||
518 | /* update, and store next to alloc */ | |
519 | nta++; | |
520 | rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; | |
521 | ||
522 | /* transfer page from old buffer to new buffer */ | |
523 | new_bi->dma = old_bi->dma & mask; | |
524 | new_bi->dma += hr; | |
525 | ||
526 | new_bi->addr = (void *)((unsigned long)old_bi->addr & mask); | |
527 | new_bi->addr += hr; | |
528 | ||
529 | new_bi->handle = old_bi->handle & mask; | |
530 | new_bi->handle += rx_ring->xsk_umem->headroom; | |
531 | ||
532 | old_bi->addr = NULL; | |
533 | } | |
534 | ||
535 | /** | |
536 | * i40e_zca_free - Free callback for MEM_TYPE_ZERO_COPY allocations | |
537 | * @alloc: Zero-copy allocator | |
538 | * @handle: Buffer handle | |
539 | **/ | |
540 | void i40e_zca_free(struct zero_copy_allocator *alloc, unsigned long handle) | |
541 | { | |
542 | struct i40e_rx_buffer *bi; | |
543 | struct i40e_ring *rx_ring; | |
544 | u64 hr, mask; | |
545 | u16 nta; | |
546 | ||
547 | rx_ring = container_of(alloc, struct i40e_ring, zca); | |
548 | hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM; | |
93ee30f3 | 549 | mask = rx_ring->xsk_umem->chunk_mask; |
0a714186 BT |
550 | |
551 | nta = rx_ring->next_to_alloc; | |
552 | bi = &rx_ring->rx_bi[nta]; | |
553 | ||
554 | nta++; | |
555 | rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; | |
556 | ||
557 | handle &= mask; | |
558 | ||
559 | bi->dma = xdp_umem_get_dma(rx_ring->xsk_umem, handle); | |
560 | bi->dma += hr; | |
561 | ||
562 | bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle); | |
563 | bi->addr += hr; | |
564 | ||
565 | bi->handle = (u64)handle + rx_ring->xsk_umem->headroom; | |
566 | } | |
567 | ||
568 | /** | |
569 | * i40e_construct_skb_zc - Create skbufff from zero-copy Rx buffer | |
570 | * @rx_ring: Rx ring | |
571 | * @bi: Rx buffer | |
572 | * @xdp: xdp_buff | |
573 | * | |
574 | * This functions allocates a new skb from a zero-copy Rx buffer. | |
575 | * | |
576 | * Returns the skb, or NULL on failure. | |
577 | **/ | |
578 | static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring, | |
579 | struct i40e_rx_buffer *bi, | |
580 | struct xdp_buff *xdp) | |
581 | { | |
582 | unsigned int metasize = xdp->data - xdp->data_meta; | |
583 | unsigned int datasize = xdp->data_end - xdp->data; | |
584 | struct sk_buff *skb; | |
585 | ||
586 | /* allocate a skb to store the frags */ | |
587 | skb = __napi_alloc_skb(&rx_ring->q_vector->napi, | |
588 | xdp->data_end - xdp->data_hard_start, | |
589 | GFP_ATOMIC | __GFP_NOWARN); | |
590 | if (unlikely(!skb)) | |
591 | return NULL; | |
592 | ||
593 | skb_reserve(skb, xdp->data - xdp->data_hard_start); | |
594 | memcpy(__skb_put(skb, datasize), xdp->data, datasize); | |
595 | if (metasize) | |
596 | skb_metadata_set(skb, metasize); | |
597 | ||
598 | i40e_reuse_rx_buffer_zc(rx_ring, bi); | |
599 | return skb; | |
600 | } | |
601 | ||
602 | /** | |
603 | * i40e_inc_ntc: Advance the next_to_clean index | |
604 | * @rx_ring: Rx ring | |
605 | **/ | |
606 | static void i40e_inc_ntc(struct i40e_ring *rx_ring) | |
607 | { | |
608 | u32 ntc = rx_ring->next_to_clean + 1; | |
609 | ||
610 | ntc = (ntc < rx_ring->count) ? ntc : 0; | |
611 | rx_ring->next_to_clean = ntc; | |
612 | prefetch(I40E_RX_DESC(rx_ring, ntc)); | |
613 | } | |
614 | ||
615 | /** | |
616 | * i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring | |
617 | * @rx_ring: Rx ring | |
618 | * @budget: NAPI budget | |
619 | * | |
620 | * Returns amount of work completed | |
621 | **/ | |
622 | int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) | |
623 | { | |
624 | unsigned int total_rx_bytes = 0, total_rx_packets = 0; | |
625 | u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); | |
626 | unsigned int xdp_res, xdp_xmit = 0; | |
627 | bool failure = false; | |
628 | struct sk_buff *skb; | |
629 | struct xdp_buff xdp; | |
630 | ||
631 | xdp.rxq = &rx_ring->xdp_rxq; | |
632 | ||
633 | while (likely(total_rx_packets < (unsigned int)budget)) { | |
634 | struct i40e_rx_buffer *bi; | |
635 | union i40e_rx_desc *rx_desc; | |
636 | unsigned int size; | |
637 | u16 vlan_tag; | |
638 | u8 rx_ptype; | |
639 | u64 qword; | |
640 | ||
641 | if (cleaned_count >= I40E_RX_BUFFER_WRITE) { | |
642 | failure = failure || | |
411dc16f BT |
643 | !i40e_alloc_rx_buffers_fast_zc(rx_ring, |
644 | cleaned_count); | |
0a714186 BT |
645 | cleaned_count = 0; |
646 | } | |
647 | ||
648 | rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean); | |
649 | qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); | |
650 | ||
651 | /* This memory barrier is needed to keep us from reading | |
652 | * any other fields out of the rx_desc until we have | |
653 | * verified the descriptor has been written back. | |
654 | */ | |
655 | dma_rmb(); | |
656 | ||
657 | bi = i40e_clean_programming_status(rx_ring, rx_desc, | |
658 | qword); | |
659 | if (unlikely(bi)) { | |
660 | i40e_reuse_rx_buffer_zc(rx_ring, bi); | |
661 | cleaned_count++; | |
662 | continue; | |
663 | } | |
664 | ||
665 | size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> | |
666 | I40E_RXD_QW1_LENGTH_PBUF_SHIFT; | |
667 | if (!size) | |
668 | break; | |
669 | ||
670 | bi = i40e_get_rx_buffer_zc(rx_ring, size); | |
671 | xdp.data = bi->addr; | |
672 | xdp.data_meta = xdp.data; | |
673 | xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM; | |
674 | xdp.data_end = xdp.data + size; | |
675 | xdp.handle = bi->handle; | |
676 | ||
677 | xdp_res = i40e_run_xdp_zc(rx_ring, &xdp); | |
678 | if (xdp_res) { | |
679 | if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) { | |
680 | xdp_xmit |= xdp_res; | |
681 | bi->addr = NULL; | |
682 | } else { | |
683 | i40e_reuse_rx_buffer_zc(rx_ring, bi); | |
684 | } | |
685 | ||
686 | total_rx_bytes += size; | |
687 | total_rx_packets++; | |
688 | ||
689 | cleaned_count++; | |
690 | i40e_inc_ntc(rx_ring); | |
691 | continue; | |
692 | } | |
693 | ||
694 | /* XDP_PASS path */ | |
695 | ||
696 | /* NB! We are not checking for errors using | |
697 | * i40e_test_staterr with | |
698 | * BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that | |
699 | * SBP is *not* set in PRT_SBPVSI (default not set). | |
700 | */ | |
701 | skb = i40e_construct_skb_zc(rx_ring, bi, &xdp); | |
702 | if (!skb) { | |
703 | rx_ring->rx_stats.alloc_buff_failed++; | |
704 | break; | |
705 | } | |
706 | ||
707 | cleaned_count++; | |
708 | i40e_inc_ntc(rx_ring); | |
709 | ||
710 | if (eth_skb_pad(skb)) | |
711 | continue; | |
712 | ||
713 | total_rx_bytes += skb->len; | |
714 | total_rx_packets++; | |
715 | ||
716 | qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); | |
717 | rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> | |
718 | I40E_RXD_QW1_PTYPE_SHIFT; | |
719 | i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); | |
720 | ||
721 | vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ? | |
722 | le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; | |
723 | i40e_receive_skb(rx_ring, skb, vlan_tag); | |
724 | } | |
725 | ||
726 | i40e_finalize_xdp_rx(rx_ring, xdp_xmit); | |
727 | i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets); | |
728 | return failure ? budget : (int)total_rx_packets; | |
729 | } | |
730 | ||
1328dcdd MK |
731 | /** |
732 | * i40e_xmit_zc - Performs zero-copy Tx AF_XDP | |
733 | * @xdp_ring: XDP Tx ring | |
734 | * @budget: NAPI budget | |
735 | * | |
736 | * Returns true if the work is finished. | |
737 | **/ | |
738 | static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget) | |
739 | { | |
cf484f9f | 740 | struct i40e_tx_desc *tx_desc = NULL; |
1328dcdd | 741 | struct i40e_tx_buffer *tx_bi; |
1328dcdd MK |
742 | bool work_done = true; |
743 | dma_addr_t dma; | |
744 | u32 len; | |
745 | ||
746 | while (budget-- > 0) { | |
747 | if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) { | |
748 | xdp_ring->tx_stats.tx_busy++; | |
749 | work_done = false; | |
750 | break; | |
751 | } | |
752 | ||
753 | if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &dma, &len)) | |
754 | break; | |
755 | ||
756 | dma_sync_single_for_device(xdp_ring->dev, dma, len, | |
757 | DMA_BIDIRECTIONAL); | |
758 | ||
759 | tx_bi = &xdp_ring->tx_bi[xdp_ring->next_to_use]; | |
760 | tx_bi->bytecount = len; | |
761 | ||
762 | tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use); | |
763 | tx_desc->buffer_addr = cpu_to_le64(dma); | |
764 | tx_desc->cmd_type_offset_bsz = | |
765 | build_ctob(I40E_TX_DESC_CMD_ICRC | |
766 | | I40E_TX_DESC_CMD_EOP, | |
767 | 0, len, 0); | |
1328dcdd MK |
768 | |
769 | xdp_ring->next_to_use++; | |
770 | if (xdp_ring->next_to_use == xdp_ring->count) | |
771 | xdp_ring->next_to_use = 0; | |
772 | } | |
773 | ||
cf484f9f | 774 | if (tx_desc) { |
1328dcdd MK |
775 | /* Request an interrupt for the last frame and bump tail ptr. */ |
776 | tx_desc->cmd_type_offset_bsz |= (I40E_TX_DESC_CMD_RS << | |
777 | I40E_TXD_QW1_CMD_SHIFT); | |
778 | i40e_xdp_ring_update_tail(xdp_ring); | |
779 | ||
780 | xsk_umem_consume_tx_done(xdp_ring->xsk_umem); | |
781 | } | |
782 | ||
783 | return !!budget && work_done; | |
784 | } | |
785 | ||
786 | /** | |
787 | * i40e_clean_xdp_tx_buffer - Frees and unmaps an XDP Tx entry | |
788 | * @tx_ring: XDP Tx ring | |
789 | * @tx_bi: Tx buffer info to clean | |
790 | **/ | |
791 | static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring, | |
792 | struct i40e_tx_buffer *tx_bi) | |
793 | { | |
794 | xdp_return_frame(tx_bi->xdpf); | |
795 | dma_unmap_single(tx_ring->dev, | |
796 | dma_unmap_addr(tx_bi, dma), | |
797 | dma_unmap_len(tx_bi, len), DMA_TO_DEVICE); | |
798 | dma_unmap_len_set(tx_bi, len, 0); | |
799 | } | |
800 | ||
801 | /** | |
802 | * i40e_clean_xdp_tx_irq - Completes AF_XDP entries, and cleans XDP entries | |
803 | * @tx_ring: XDP Tx ring | |
804 | * @tx_bi: Tx buffer info to clean | |
805 | * | |
806 | * Returns true if cleanup/tranmission is done. | |
807 | **/ | |
808 | bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, | |
809 | struct i40e_ring *tx_ring, int napi_budget) | |
810 | { | |
811 | unsigned int ntc, total_bytes = 0, budget = vsi->work_limit; | |
812 | u32 i, completed_frames, frames_ready, xsk_frames = 0; | |
813 | struct xdp_umem *umem = tx_ring->xsk_umem; | |
814 | u32 head_idx = i40e_get_head(tx_ring); | |
815 | bool work_done = true, xmit_done; | |
816 | struct i40e_tx_buffer *tx_bi; | |
817 | ||
818 | if (head_idx < tx_ring->next_to_clean) | |
819 | head_idx += tx_ring->count; | |
820 | frames_ready = head_idx - tx_ring->next_to_clean; | |
821 | ||
822 | if (frames_ready == 0) { | |
823 | goto out_xmit; | |
824 | } else if (frames_ready > budget) { | |
825 | completed_frames = budget; | |
826 | work_done = false; | |
827 | } else { | |
828 | completed_frames = frames_ready; | |
829 | } | |
830 | ||
831 | ntc = tx_ring->next_to_clean; | |
832 | ||
833 | for (i = 0; i < completed_frames; i++) { | |
834 | tx_bi = &tx_ring->tx_bi[ntc]; | |
835 | ||
836 | if (tx_bi->xdpf) | |
837 | i40e_clean_xdp_tx_buffer(tx_ring, tx_bi); | |
838 | else | |
839 | xsk_frames++; | |
840 | ||
841 | tx_bi->xdpf = NULL; | |
842 | total_bytes += tx_bi->bytecount; | |
843 | ||
844 | if (++ntc >= tx_ring->count) | |
845 | ntc = 0; | |
846 | } | |
847 | ||
848 | tx_ring->next_to_clean += completed_frames; | |
849 | if (unlikely(tx_ring->next_to_clean >= tx_ring->count)) | |
850 | tx_ring->next_to_clean -= tx_ring->count; | |
851 | ||
852 | if (xsk_frames) | |
853 | xsk_umem_complete_tx(umem, xsk_frames); | |
854 | ||
855 | i40e_arm_wb(tx_ring, vsi, budget); | |
856 | i40e_update_tx_stats(tx_ring, completed_frames, total_bytes); | |
857 | ||
858 | out_xmit: | |
859 | xmit_done = i40e_xmit_zc(tx_ring, budget); | |
860 | ||
861 | return work_done && xmit_done; | |
862 | } | |
863 | ||
864 | /** | |
865 | * i40e_xsk_async_xmit - Implements the ndo_xsk_async_xmit | |
866 | * @dev: the netdevice | |
867 | * @queue_id: queue id to wake up | |
868 | * | |
869 | * Returns <0 for errors, 0 otherwise. | |
870 | **/ | |
871 | int i40e_xsk_async_xmit(struct net_device *dev, u32 queue_id) | |
872 | { | |
873 | struct i40e_netdev_priv *np = netdev_priv(dev); | |
874 | struct i40e_vsi *vsi = np->vsi; | |
875 | struct i40e_ring *ring; | |
876 | ||
877 | if (test_bit(__I40E_VSI_DOWN, vsi->state)) | |
878 | return -ENETDOWN; | |
879 | ||
880 | if (!i40e_enabled_xdp_vsi(vsi)) | |
881 | return -ENXIO; | |
882 | ||
883 | if (queue_id >= vsi->num_queue_pairs) | |
884 | return -ENXIO; | |
885 | ||
886 | if (!vsi->xdp_rings[queue_id]->xsk_umem) | |
887 | return -ENXIO; | |
888 | ||
889 | ring = vsi->xdp_rings[queue_id]; | |
890 | ||
891 | /* The idea here is that if NAPI is running, mark a miss, so | |
892 | * it will run again. If not, trigger an interrupt and | |
893 | * schedule the NAPI from interrupt context. If NAPI would be | |
894 | * scheduled here, the interrupt affinity would not be | |
895 | * honored. | |
896 | */ | |
897 | if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) | |
898 | i40e_force_wb(vsi, ring->q_vector); | |
899 | ||
900 | return 0; | |
901 | } | |
9dbb1370 | 902 | |
411dc16f BT |
903 | void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring) |
904 | { | |
905 | u16 i; | |
906 | ||
907 | for (i = 0; i < rx_ring->count; i++) { | |
908 | struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; | |
909 | ||
910 | if (!rx_bi->addr) | |
911 | continue; | |
912 | ||
913 | xsk_umem_fq_reuse(rx_ring->xsk_umem, rx_bi->handle); | |
914 | rx_bi->addr = NULL; | |
915 | } | |
916 | } | |
917 | ||
9dbb1370 BT |
918 | /** |
919 | * i40e_xsk_clean_xdp_ring - Clean the XDP Tx ring on shutdown | |
920 | * @xdp_ring: XDP Tx ring | |
921 | **/ | |
922 | void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring) | |
923 | { | |
924 | u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use; | |
925 | struct xdp_umem *umem = tx_ring->xsk_umem; | |
926 | struct i40e_tx_buffer *tx_bi; | |
927 | u32 xsk_frames = 0; | |
928 | ||
929 | while (ntc != ntu) { | |
930 | tx_bi = &tx_ring->tx_bi[ntc]; | |
931 | ||
932 | if (tx_bi->xdpf) | |
933 | i40e_clean_xdp_tx_buffer(tx_ring, tx_bi); | |
934 | else | |
935 | xsk_frames++; | |
936 | ||
937 | tx_bi->xdpf = NULL; | |
938 | ||
939 | ntc++; | |
940 | if (ntc >= tx_ring->count) | |
941 | ntc = 0; | |
942 | } | |
943 | ||
944 | if (xsk_frames) | |
945 | xsk_umem_complete_tx(umem, xsk_frames); | |
946 | } | |
3ab52af5 BT |
947 | |
948 | /** | |
949 | * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have AF_XDP UMEM attached | |
950 | * @vsi: vsi | |
951 | * | |
952 | * Returns true if any of the Rx rings has an AF_XDP UMEM attached | |
953 | **/ | |
954 | bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi) | |
955 | { | |
956 | int i; | |
957 | ||
958 | if (!vsi->xsk_umems) | |
959 | return false; | |
960 | ||
961 | for (i = 0; i < vsi->num_queue_pairs; i++) { | |
962 | if (vsi->xsk_umems[i]) | |
963 | return true; | |
964 | } | |
965 | ||
966 | return false; | |
967 | } |