1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Google virtual Ethernet (gve) driver
4 * Copyright (C) 2015-2021 Google, Inc.
8 #include "gve_adminq.h"
10 #include <linux/etherdevice.h>
11 #include <linux/filter.h>
13 #include <net/xdp_sock_drv.h>
15 static void gve_rx_free_buffer(struct device *dev,
16 struct gve_rx_slot_page_info *page_info,
17 union gve_rx_data_slot *data_slot)
19 dma_addr_t dma = (dma_addr_t)(be64_to_cpu(data_slot->addr) &
20 GVE_DATA_SLOT_ADDR_PAGE_MASK);
22 page_ref_sub(page_info->page, page_info->pagecnt_bias - 1);
23 gve_free_page(dev, page_info->page, dma, DMA_FROM_DEVICE);
26 static void gve_rx_unfill_pages(struct gve_priv *priv,
27 struct gve_rx_ring *rx,
28 struct gve_rx_alloc_rings_cfg *cfg)
30 u32 slots = rx->mask + 1;
33 if (!rx->data.page_info)
36 if (rx->data.raw_addressing) {
37 for (i = 0; i < slots; i++)
38 gve_rx_free_buffer(&priv->pdev->dev, &rx->data.page_info[i],
39 &rx->data.data_ring[i]);
41 for (i = 0; i < slots; i++)
42 page_ref_sub(rx->data.page_info[i].page,
43 rx->data.page_info[i].pagecnt_bias - 1);
45 for (i = 0; i < rx->qpl_copy_pool_mask + 1; i++) {
46 page_ref_sub(rx->qpl_copy_pool[i].page,
47 rx->qpl_copy_pool[i].pagecnt_bias - 1);
48 put_page(rx->qpl_copy_pool[i].page);
51 kvfree(rx->data.page_info);
52 rx->data.page_info = NULL;
55 static void gve_rx_ctx_clear(struct gve_rx_ctx *ctx)
61 ctx->drop_pkt = false;
64 static void gve_rx_init_ring_state_gqi(struct gve_rx_ring *rx)
68 gve_rx_ctx_clear(&rx->ctx);
71 static void gve_rx_reset_ring_gqi(struct gve_priv *priv, int idx)
73 struct gve_rx_ring *rx = &priv->rx[idx];
74 const u32 slots = priv->rx_desc_cnt;
78 if (rx->desc.desc_ring) {
79 size = slots * sizeof(rx->desc.desc_ring[0]);
80 memset(rx->desc.desc_ring, 0, size);
83 /* Reset q_resources */
85 memset(rx->q_resources, 0, sizeof(*rx->q_resources));
87 gve_rx_init_ring_state_gqi(rx);
90 void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx)
92 int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
94 if (!gve_rx_was_added_to_block(priv, idx))
97 gve_remove_napi(priv, ntfy_idx);
98 gve_rx_remove_from_block(priv, idx);
99 gve_rx_reset_ring_gqi(priv, idx);
102 void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
103 struct gve_rx_alloc_rings_cfg *cfg)
105 struct device *dev = &priv->pdev->dev;
106 u32 slots = rx->mask + 1;
111 if (rx->desc.desc_ring) {
112 bytes = sizeof(struct gve_rx_desc) * cfg->ring_size;
113 dma_free_coherent(dev, bytes, rx->desc.desc_ring, rx->desc.bus);
114 rx->desc.desc_ring = NULL;
117 if (rx->q_resources) {
118 dma_free_coherent(dev, sizeof(*rx->q_resources),
119 rx->q_resources, rx->q_resources_bus);
120 rx->q_resources = NULL;
123 gve_rx_unfill_pages(priv, rx, cfg);
125 if (rx->data.data_ring) {
126 bytes = sizeof(*rx->data.data_ring) * slots;
127 dma_free_coherent(dev, bytes, rx->data.data_ring,
129 rx->data.data_ring = NULL;
132 kvfree(rx->qpl_copy_pool);
133 rx->qpl_copy_pool = NULL;
136 qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, idx);
137 gve_free_queue_page_list(priv, rx->data.qpl, qpl_id);
141 netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx);
144 static void gve_setup_rx_buffer(struct gve_rx_slot_page_info *page_info,
145 dma_addr_t addr, struct page *page, __be64 *slot_addr)
147 page_info->page = page;
148 page_info->page_offset = 0;
149 page_info->page_address = page_address(page);
150 *slot_addr = cpu_to_be64(addr);
151 /* The page already has 1 ref */
152 page_ref_add(page, INT_MAX - 1);
153 page_info->pagecnt_bias = INT_MAX;
156 static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev,
157 struct gve_rx_slot_page_info *page_info,
158 union gve_rx_data_slot *data_slot,
159 struct gve_rx_ring *rx)
165 err = gve_alloc_page(priv, dev, &page, &dma, DMA_FROM_DEVICE,
168 u64_stats_update_begin(&rx->statss);
169 rx->rx_buf_alloc_fail++;
170 u64_stats_update_end(&rx->statss);
174 gve_setup_rx_buffer(page_info, dma, page, &data_slot->addr);
178 static int gve_rx_prefill_pages(struct gve_rx_ring *rx,
179 struct gve_rx_alloc_rings_cfg *cfg)
181 struct gve_priv *priv = rx->gve;
187 /* Allocate one page per Rx queue slot. Each page is split into two
188 * packet buffers, when possible we "page flip" between the two.
190 slots = rx->mask + 1;
192 rx->data.page_info = kvzalloc(slots *
193 sizeof(*rx->data.page_info), GFP_KERNEL);
194 if (!rx->data.page_info)
197 for (i = 0; i < slots; i++) {
198 if (!rx->data.raw_addressing) {
199 struct page *page = rx->data.qpl->pages[i];
200 dma_addr_t addr = i * PAGE_SIZE;
202 gve_setup_rx_buffer(&rx->data.page_info[i], addr, page,
203 &rx->data.data_ring[i].qpl_offset);
206 err = gve_rx_alloc_buffer(priv, &priv->pdev->dev,
207 &rx->data.page_info[i],
208 &rx->data.data_ring[i], rx);
213 if (!rx->data.raw_addressing) {
214 for (j = 0; j < rx->qpl_copy_pool_mask + 1; j++) {
215 struct page *page = alloc_page(GFP_KERNEL);
222 rx->qpl_copy_pool[j].page = page;
223 rx->qpl_copy_pool[j].page_offset = 0;
224 rx->qpl_copy_pool[j].page_address = page_address(page);
226 /* The page already has 1 ref. */
227 page_ref_add(page, INT_MAX - 1);
228 rx->qpl_copy_pool[j].pagecnt_bias = INT_MAX;
235 /* Fully free the copy pool pages. */
237 page_ref_sub(rx->qpl_copy_pool[j].page,
238 rx->qpl_copy_pool[j].pagecnt_bias - 1);
239 put_page(rx->qpl_copy_pool[j].page);
242 /* Do not fully free QPL pages - only remove the bias added in this
243 * function with gve_setup_rx_buffer.
246 page_ref_sub(rx->data.page_info[i].page,
247 rx->data.page_info[i].pagecnt_bias - 1);
253 gve_rx_free_buffer(&priv->pdev->dev,
254 &rx->data.page_info[i],
255 &rx->data.data_ring[i]);
259 void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx)
261 int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
263 gve_rx_add_to_block(priv, idx);
264 gve_add_napi(priv, ntfy_idx, gve_napi_poll);
267 int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
268 struct gve_rx_alloc_rings_cfg *cfg,
269 struct gve_rx_ring *rx,
272 struct device *hdev = &priv->pdev->dev;
273 u32 slots = cfg->ring_size;
280 netif_dbg(priv, drv, priv->dev, "allocating rx ring\n");
281 /* Make sure everything is zeroed to start with */
282 memset(rx, 0, sizeof(*rx));
287 rx->mask = slots - 1;
288 rx->data.raw_addressing = cfg->raw_addressing;
290 /* alloc rx data ring */
291 bytes = sizeof(*rx->data.data_ring) * slots;
292 rx->data.data_ring = dma_alloc_coherent(hdev, bytes,
295 if (!rx->data.data_ring)
298 rx->qpl_copy_pool_mask = min_t(u32, U32_MAX, slots * 2) - 1;
299 rx->qpl_copy_pool_head = 0;
300 rx->qpl_copy_pool = kvcalloc(rx->qpl_copy_pool_mask + 1,
301 sizeof(rx->qpl_copy_pool[0]),
304 if (!rx->qpl_copy_pool) {
306 goto abort_with_slots;
309 if (!rx->data.raw_addressing) {
310 qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num);
311 qpl_page_cnt = cfg->ring_size;
313 rx->data.qpl = gve_alloc_queue_page_list(priv, qpl_id,
317 goto abort_with_copy_pool;
321 filled_pages = gve_rx_prefill_pages(rx, cfg);
322 if (filled_pages < 0) {
326 rx->fill_cnt = filled_pages;
327 /* Ensure data ring slots (packet buffers) are visible. */
330 /* Alloc gve_queue_resources */
332 dma_alloc_coherent(hdev,
333 sizeof(*rx->q_resources),
334 &rx->q_resources_bus,
336 if (!rx->q_resources) {
340 netif_dbg(priv, drv, priv->dev, "rx[%d]->data.data_bus=%lx\n", idx,
341 (unsigned long)rx->data.data_bus);
343 /* alloc rx desc ring */
344 bytes = sizeof(struct gve_rx_desc) * cfg->ring_size;
345 rx->desc.desc_ring = dma_alloc_coherent(hdev, bytes, &rx->desc.bus,
347 if (!rx->desc.desc_ring) {
349 goto abort_with_q_resources;
351 rx->db_threshold = slots / 2;
352 gve_rx_init_ring_state_gqi(rx);
354 rx->packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
355 gve_rx_ctx_clear(&rx->ctx);
359 abort_with_q_resources:
360 dma_free_coherent(hdev, sizeof(*rx->q_resources),
361 rx->q_resources, rx->q_resources_bus);
362 rx->q_resources = NULL;
364 gve_rx_unfill_pages(priv, rx, cfg);
366 if (!rx->data.raw_addressing) {
367 gve_free_queue_page_list(priv, rx->data.qpl, qpl_id);
370 abort_with_copy_pool:
371 kvfree(rx->qpl_copy_pool);
372 rx->qpl_copy_pool = NULL;
374 bytes = sizeof(*rx->data.data_ring) * slots;
375 dma_free_coherent(hdev, bytes, rx->data.data_ring, rx->data.data_bus);
376 rx->data.data_ring = NULL;
381 int gve_rx_alloc_rings_gqi(struct gve_priv *priv,
382 struct gve_rx_alloc_rings_cfg *cfg)
384 struct gve_rx_ring *rx;
388 rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring),
393 for (i = 0; i < cfg->qcfg->num_queues; i++) {
394 err = gve_rx_alloc_ring_gqi(priv, cfg, &rx[i], i);
396 netif_err(priv, drv, priv->dev,
397 "Failed to alloc rx ring=%d: err=%d\n",
407 for (j = 0; j < i; j++)
408 gve_rx_free_ring_gqi(priv, &rx[j], cfg);
413 void gve_rx_free_rings_gqi(struct gve_priv *priv,
414 struct gve_rx_alloc_rings_cfg *cfg)
416 struct gve_rx_ring *rx = cfg->rx;
422 for (i = 0; i < cfg->qcfg->num_queues; i++)
423 gve_rx_free_ring_gqi(priv, &rx[i], cfg);
429 void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx)
431 u32 db_idx = be32_to_cpu(rx->q_resources->db_index);
433 iowrite32be(rx->fill_cnt, &priv->db_bar2[db_idx]);
436 static enum pkt_hash_types gve_rss_type(__be16 pkt_flags)
438 if (likely(pkt_flags & (GVE_RXF_TCP | GVE_RXF_UDP)))
439 return PKT_HASH_TYPE_L4;
440 if (pkt_flags & (GVE_RXF_IPV4 | GVE_RXF_IPV6))
441 return PKT_HASH_TYPE_L3;
442 return PKT_HASH_TYPE_L2;
445 static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi,
446 struct gve_rx_slot_page_info *page_info,
447 unsigned int truesize, u16 len,
448 struct gve_rx_ctx *ctx)
450 u32 offset = page_info->page_offset + page_info->pad;
451 struct sk_buff *skb = ctx->skb_tail;
455 skb = napi_get_frags(napi);
462 num_frags = skb_shinfo(ctx->skb_tail)->nr_frags;
463 if (num_frags == MAX_SKB_FRAGS) {
464 skb = napi_alloc_skb(napi, 0);
468 // We will never chain more than two SKBs: 2 * 16 * 2k > 64k
469 // which is why we do not need to chain by using skb->next
470 skb_shinfo(ctx->skb_tail)->frag_list = skb;
477 if (skb != ctx->skb_head) {
478 ctx->skb_head->len += len;
479 ctx->skb_head->data_len += len;
480 ctx->skb_head->truesize += truesize;
482 skb_add_rx_frag(skb, num_frags, page_info->page,
483 offset, len, truesize);
485 return ctx->skb_head;
488 static void gve_rx_flip_buff(struct gve_rx_slot_page_info *page_info, __be64 *slot_addr)
490 const __be64 offset = cpu_to_be64(GVE_DEFAULT_RX_BUFFER_OFFSET);
492 /* "flip" to other packet buffer on this page */
493 page_info->page_offset ^= GVE_DEFAULT_RX_BUFFER_OFFSET;
494 *(slot_addr) ^= offset;
497 static int gve_rx_can_recycle_buffer(struct gve_rx_slot_page_info *page_info)
499 int pagecount = page_count(page_info->page);
501 /* This page is not being used by any SKBs - reuse */
502 if (pagecount == page_info->pagecnt_bias)
504 /* This page is still being used by an SKB - we can't reuse */
505 else if (pagecount > page_info->pagecnt_bias)
507 WARN(pagecount < page_info->pagecnt_bias,
508 "Pagecount should never be less than the bias.");
512 static struct sk_buff *
513 gve_rx_raw_addressing(struct device *dev, struct net_device *netdev,
514 struct gve_rx_slot_page_info *page_info, u16 len,
515 struct napi_struct *napi,
516 union gve_rx_data_slot *data_slot,
517 u16 packet_buffer_size, struct gve_rx_ctx *ctx)
519 struct sk_buff *skb = gve_rx_add_frags(napi, page_info, packet_buffer_size, len, ctx);
524 /* Optimistically stop the kernel from freeing the page.
525 * We will check again in refill to determine if we need to alloc a
528 gve_dec_pagecnt_bias(page_info);
533 static struct sk_buff *gve_rx_copy_to_pool(struct gve_rx_ring *rx,
534 struct gve_rx_slot_page_info *page_info,
535 u16 len, struct napi_struct *napi)
537 u32 pool_idx = rx->qpl_copy_pool_head & rx->qpl_copy_pool_mask;
538 void *src = page_info->page_address + page_info->page_offset;
539 struct gve_rx_slot_page_info *copy_page_info;
540 struct gve_rx_ctx *ctx = &rx->ctx;
541 bool alloc_page = false;
545 copy_page_info = &rx->qpl_copy_pool[pool_idx];
546 if (!copy_page_info->can_flip) {
547 int recycle = gve_rx_can_recycle_buffer(copy_page_info);
549 if (unlikely(recycle < 0)) {
550 gve_schedule_reset(rx->gve);
553 alloc_page = !recycle;
557 struct gve_rx_slot_page_info alloc_page_info;
560 /* The least recently used page turned out to be
561 * still in use by the kernel. Ignoring it and moving
562 * on alleviates head-of-line blocking.
564 rx->qpl_copy_pool_head++;
566 page = alloc_page(GFP_ATOMIC);
570 alloc_page_info.page = page;
571 alloc_page_info.page_offset = 0;
572 alloc_page_info.page_address = page_address(page);
573 alloc_page_info.pad = page_info->pad;
575 memcpy(alloc_page_info.page_address, src, page_info->pad + len);
576 skb = gve_rx_add_frags(napi, &alloc_page_info,
580 u64_stats_update_begin(&rx->statss);
581 rx->rx_frag_copy_cnt++;
582 rx->rx_frag_alloc_cnt++;
583 u64_stats_update_end(&rx->statss);
588 dst = copy_page_info->page_address + copy_page_info->page_offset;
589 memcpy(dst, src, page_info->pad + len);
590 copy_page_info->pad = page_info->pad;
592 skb = gve_rx_add_frags(napi, copy_page_info,
593 rx->packet_buffer_size, len, ctx);
597 gve_dec_pagecnt_bias(copy_page_info);
598 copy_page_info->page_offset ^= GVE_DEFAULT_RX_BUFFER_OFFSET;
600 if (copy_page_info->can_flip) {
601 /* We have used both halves of this copy page, it
602 * is time for it to go to the back of the queue.
604 copy_page_info->can_flip = false;
605 rx->qpl_copy_pool_head++;
606 prefetch(rx->qpl_copy_pool[rx->qpl_copy_pool_head & rx->qpl_copy_pool_mask].page);
608 copy_page_info->can_flip = true;
611 u64_stats_update_begin(&rx->statss);
612 rx->rx_frag_copy_cnt++;
613 u64_stats_update_end(&rx->statss);
618 static struct sk_buff *
619 gve_rx_qpl(struct device *dev, struct net_device *netdev,
620 struct gve_rx_ring *rx, struct gve_rx_slot_page_info *page_info,
621 u16 len, struct napi_struct *napi,
622 union gve_rx_data_slot *data_slot)
624 struct gve_rx_ctx *ctx = &rx->ctx;
627 /* if raw_addressing mode is not enabled gvnic can only receive into
628 * registered segments. If the buffer can't be recycled, our only
629 * choice is to copy the data out of it so that we can return it to the
632 if (page_info->can_flip) {
633 skb = gve_rx_add_frags(napi, page_info, rx->packet_buffer_size, len, ctx);
634 /* No point in recycling if we didn't get the skb */
636 /* Make sure that the page isn't freed. */
637 gve_dec_pagecnt_bias(page_info);
638 gve_rx_flip_buff(page_info, &data_slot->qpl_offset);
641 skb = gve_rx_copy_to_pool(rx, page_info, len, napi);
646 static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx,
647 struct gve_rx_slot_page_info *page_info, struct napi_struct *napi,
648 u16 len, union gve_rx_data_slot *data_slot,
651 struct net_device *netdev = priv->dev;
652 struct gve_rx_ctx *ctx = &rx->ctx;
653 struct sk_buff *skb = NULL;
655 if (len <= priv->rx_copybreak && is_only_frag) {
656 /* Just copy small packets */
657 skb = gve_rx_copy(netdev, napi, page_info, len);
659 u64_stats_update_begin(&rx->statss);
661 rx->rx_frag_copy_cnt++;
662 rx->rx_copybreak_pkt++;
663 u64_stats_update_end(&rx->statss);
666 int recycle = gve_rx_can_recycle_buffer(page_info);
668 if (unlikely(recycle < 0)) {
669 gve_schedule_reset(priv);
672 page_info->can_flip = recycle;
673 if (page_info->can_flip) {
674 u64_stats_update_begin(&rx->statss);
675 rx->rx_frag_flip_cnt++;
676 u64_stats_update_end(&rx->statss);
679 if (rx->data.raw_addressing) {
680 skb = gve_rx_raw_addressing(&priv->pdev->dev, netdev,
681 page_info, len, napi,
683 rx->packet_buffer_size, ctx);
685 skb = gve_rx_qpl(&priv->pdev->dev, netdev, rx,
686 page_info, len, napi, data_slot);
692 static int gve_xsk_pool_redirect(struct net_device *dev,
693 struct gve_rx_ring *rx,
695 struct bpf_prog *xdp_prog)
697 struct xdp_buff *xdp;
700 if (rx->xsk_pool->frame_len < len)
702 xdp = xsk_buff_alloc(rx->xsk_pool);
704 u64_stats_update_begin(&rx->statss);
705 rx->xdp_alloc_fails++;
706 u64_stats_update_end(&rx->statss);
709 xdp->data_end = xdp->data + len;
710 memcpy(xdp->data, data, len);
711 err = xdp_do_redirect(dev, xdp, xdp_prog);
717 static int gve_xdp_redirect(struct net_device *dev, struct gve_rx_ring *rx,
718 struct xdp_buff *orig, struct bpf_prog *xdp_prog)
720 int total_len, len = orig->data_end - orig->data;
721 int headroom = XDP_PACKET_HEADROOM;
727 return gve_xsk_pool_redirect(dev, rx, orig->data,
730 total_len = headroom + SKB_DATA_ALIGN(len) +
731 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
732 frame = page_frag_alloc(&rx->page_cache, total_len, GFP_ATOMIC);
734 u64_stats_update_begin(&rx->statss);
735 rx->xdp_alloc_fails++;
736 u64_stats_update_end(&rx->statss);
739 xdp_init_buff(&new, total_len, &rx->xdp_rxq);
740 xdp_prepare_buff(&new, frame, headroom, len, false);
741 memcpy(new.data, orig->data, len);
743 err = xdp_do_redirect(dev, &new, xdp_prog);
745 page_frag_free(frame);
750 static void gve_xdp_done(struct gve_priv *priv, struct gve_rx_ring *rx,
751 struct xdp_buff *xdp, struct bpf_prog *xprog,
754 struct gve_tx_ring *tx;
764 tx_qid = gve_xdp_tx_queue_id(priv, rx->q_num);
765 tx = &priv->tx[tx_qid];
766 spin_lock(&tx->xdp_lock);
767 err = gve_xdp_xmit_one(priv, tx, xdp->data,
768 xdp->data_end - xdp->data, NULL);
769 spin_unlock(&tx->xdp_lock);
772 u64_stats_update_begin(&rx->statss);
774 u64_stats_update_end(&rx->statss);
778 err = gve_xdp_redirect(priv->dev, rx, xdp, xprog);
781 u64_stats_update_begin(&rx->statss);
782 rx->xdp_redirect_errors++;
783 u64_stats_update_end(&rx->statss);
787 u64_stats_update_begin(&rx->statss);
788 if ((u32)xdp_act < GVE_XDP_ACTIONS)
789 rx->xdp_actions[xdp_act]++;
790 u64_stats_update_end(&rx->statss);
793 #define GVE_PKTCONT_BIT_IS_SET(x) (GVE_RXF_PKT_CONT & (x))
794 static void gve_rx(struct gve_rx_ring *rx, netdev_features_t feat,
795 struct gve_rx_desc *desc, u32 idx,
796 struct gve_rx_cnts *cnts)
798 bool is_last_frag = !GVE_PKTCONT_BIT_IS_SET(desc->flags_seq);
799 struct gve_rx_slot_page_info *page_info;
800 u16 frag_size = be16_to_cpu(desc->len);
801 struct gve_rx_ctx *ctx = &rx->ctx;
802 union gve_rx_data_slot *data_slot;
803 struct gve_priv *priv = rx->gve;
804 struct sk_buff *skb = NULL;
805 struct bpf_prog *xprog;
811 struct napi_struct *napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
812 bool is_first_frag = ctx->frag_cnt == 0;
814 bool is_only_frag = is_first_frag && is_last_frag;
816 if (unlikely(ctx->drop_pkt))
819 if (desc->flags_seq & GVE_RXF_ERR) {
820 ctx->drop_pkt = true;
821 cnts->desc_err_pkt_cnt++;
822 napi_free_frags(napi);
826 if (unlikely(frag_size > rx->packet_buffer_size)) {
827 netdev_warn(priv->dev, "Unexpected frag size %d, can't exceed %d, scheduling reset",
828 frag_size, rx->packet_buffer_size);
829 ctx->drop_pkt = true;
830 napi_free_frags(napi);
831 gve_schedule_reset(rx->gve);
835 /* Prefetch two packet buffers ahead, we will need it soon. */
836 page_info = &rx->data.page_info[(idx + 2) & rx->mask];
837 va = page_info->page_address + page_info->page_offset;
838 prefetch(page_info->page); /* Kernel page struct. */
839 prefetch(va); /* Packet header. */
840 prefetch(va + 64); /* Next cacheline too. */
842 page_info = &rx->data.page_info[idx];
843 data_slot = &rx->data.data_ring[idx];
844 page_bus = (rx->data.raw_addressing) ?
845 be64_to_cpu(data_slot->addr) - page_info->page_offset :
846 rx->data.qpl->page_buses[idx];
847 dma_sync_single_for_cpu(&priv->pdev->dev, page_bus,
848 PAGE_SIZE, DMA_FROM_DEVICE);
849 page_info->pad = is_first_frag ? GVE_RX_PAD : 0;
850 len -= page_info->pad;
851 frag_size -= page_info->pad;
853 xprog = READ_ONCE(priv->xdp_prog);
854 if (xprog && is_only_frag) {
858 xdp_init_buff(&xdp, rx->packet_buffer_size, &rx->xdp_rxq);
859 xdp_prepare_buff(&xdp, page_info->page_address +
860 page_info->page_offset, GVE_RX_PAD,
863 xdp_act = bpf_prog_run_xdp(xprog, &xdp);
864 if (xdp_act != XDP_PASS) {
865 gve_xdp_done(priv, rx, &xdp, xprog, xdp_act);
866 ctx->total_size += frag_size;
870 page_info->pad += xdp.data - old_data;
871 len = xdp.data_end - xdp.data;
873 u64_stats_update_begin(&rx->statss);
874 rx->xdp_actions[XDP_PASS]++;
875 u64_stats_update_end(&rx->statss);
878 skb = gve_rx_skb(priv, rx, page_info, napi, len,
879 data_slot, is_only_frag);
881 u64_stats_update_begin(&rx->statss);
882 rx->rx_skb_alloc_fail++;
883 u64_stats_update_end(&rx->statss);
885 napi_free_frags(napi);
886 ctx->drop_pkt = true;
889 ctx->total_size += frag_size;
892 if (likely(feat & NETIF_F_RXCSUM)) {
893 /* NIC passes up the partial sum */
895 skb->ip_summed = CHECKSUM_COMPLETE;
897 skb->ip_summed = CHECKSUM_NONE;
898 skb->csum = csum_unfold(desc->csum);
901 /* parse flags & pass relevant info up */
902 if (likely(feat & NETIF_F_RXHASH) &&
903 gve_needs_rss(desc->flags_seq))
904 skb_set_hash(skb, be32_to_cpu(desc->rss_hash),
905 gve_rss_type(desc->flags_seq));
909 skb_record_rx_queue(skb, rx->q_num);
910 if (skb_is_nonlinear(skb))
911 napi_gro_frags(napi);
913 napi_gro_receive(napi, skb);
920 cnts->ok_pkt_bytes += ctx->total_size;
925 cnts->total_pkt_cnt++;
926 cnts->cont_pkt_cnt += (ctx->frag_cnt > 1);
927 gve_rx_ctx_clear(ctx);
931 bool gve_rx_work_pending(struct gve_rx_ring *rx)
933 struct gve_rx_desc *desc;
937 next_idx = rx->cnt & rx->mask;
938 desc = rx->desc.desc_ring + next_idx;
940 flags_seq = desc->flags_seq;
942 return (GVE_SEQNO(flags_seq) == rx->desc.seqno);
945 static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx)
947 int refill_target = rx->mask + 1;
948 u32 fill_cnt = rx->fill_cnt;
950 while (fill_cnt - rx->cnt < refill_target) {
951 struct gve_rx_slot_page_info *page_info;
952 u32 idx = fill_cnt & rx->mask;
954 page_info = &rx->data.page_info[idx];
955 if (page_info->can_flip) {
956 /* The other half of the page is free because it was
957 * free when we processed the descriptor. Flip to it.
959 union gve_rx_data_slot *data_slot =
960 &rx->data.data_ring[idx];
962 gve_rx_flip_buff(page_info, &data_slot->addr);
963 page_info->can_flip = 0;
965 /* It is possible that the networking stack has already
966 * finished processing all outstanding packets in the buffer
967 * and it can be reused.
968 * Flipping is unnecessary here - if the networking stack still
969 * owns half the page it is impossible to tell which half. Either
970 * the whole page is free or it needs to be replaced.
972 int recycle = gve_rx_can_recycle_buffer(page_info);
975 if (!rx->data.raw_addressing)
976 gve_schedule_reset(priv);
980 /* We can't reuse the buffer - alloc a new one*/
981 union gve_rx_data_slot *data_slot =
982 &rx->data.data_ring[idx];
983 struct device *dev = &priv->pdev->dev;
984 gve_rx_free_buffer(dev, page_info, data_slot);
985 page_info->page = NULL;
986 if (gve_rx_alloc_buffer(priv, dev, page_info,
994 rx->fill_cnt = fill_cnt;
998 static int gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
999 netdev_features_t feat)
1001 u64 xdp_redirects = rx->xdp_actions[XDP_REDIRECT];
1002 u64 xdp_txs = rx->xdp_actions[XDP_TX];
1003 struct gve_rx_ctx *ctx = &rx->ctx;
1004 struct gve_priv *priv = rx->gve;
1005 struct gve_rx_cnts cnts = {0};
1006 struct gve_rx_desc *next_desc;
1007 u32 idx = rx->cnt & rx->mask;
1010 struct gve_rx_desc *desc = &rx->desc.desc_ring[idx];
1012 // Exceed budget only if (and till) the inflight packet is consumed.
1013 while ((GVE_SEQNO(desc->flags_seq) == rx->desc.seqno) &&
1014 (work_done < budget || ctx->frag_cnt)) {
1015 next_desc = &rx->desc.desc_ring[(idx + 1) & rx->mask];
1016 prefetch(next_desc);
1018 gve_rx(rx, feat, desc, idx, &cnts);
1021 idx = rx->cnt & rx->mask;
1022 desc = &rx->desc.desc_ring[idx];
1023 rx->desc.seqno = gve_next_seqno(rx->desc.seqno);
1027 // The device will only send whole packets.
1028 if (unlikely(ctx->frag_cnt)) {
1029 struct napi_struct *napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
1031 napi_free_frags(napi);
1032 gve_rx_ctx_clear(&rx->ctx);
1033 netdev_warn(priv->dev, "Unexpected seq number %d with incomplete packet, expected %d, scheduling reset",
1034 GVE_SEQNO(desc->flags_seq), rx->desc.seqno);
1035 gve_schedule_reset(rx->gve);
1038 if (!work_done && rx->fill_cnt - rx->cnt > rx->db_threshold)
1042 u64_stats_update_begin(&rx->statss);
1043 rx->rpackets += cnts.ok_pkt_cnt;
1044 rx->rbytes += cnts.ok_pkt_bytes;
1045 rx->rx_cont_packet_cnt += cnts.cont_pkt_cnt;
1046 rx->rx_desc_err_dropped_pkt += cnts.desc_err_pkt_cnt;
1047 u64_stats_update_end(&rx->statss);
1050 if (xdp_txs != rx->xdp_actions[XDP_TX])
1051 gve_xdp_tx_flush(priv, rx->q_num);
1053 if (xdp_redirects != rx->xdp_actions[XDP_REDIRECT])
1056 /* restock ring slots */
1057 if (!rx->data.raw_addressing) {
1058 /* In QPL mode buffs are refilled as the desc are processed */
1059 rx->fill_cnt += work_done;
1060 } else if (rx->fill_cnt - rx->cnt <= rx->db_threshold) {
1061 /* In raw addressing mode buffs are only refilled if the avail
1062 * falls below a threshold.
1064 if (!gve_rx_refill_buffers(priv, rx))
1067 /* If we were not able to completely refill buffers, we'll want
1068 * to schedule this queue for work again to refill buffers.
1070 if (rx->fill_cnt - rx->cnt <= rx->db_threshold) {
1071 gve_rx_write_doorbell(priv, rx);
1076 gve_rx_write_doorbell(priv, rx);
1077 return cnts.total_pkt_cnt;
1080 int gve_rx_poll(struct gve_notify_block *block, int budget)
1082 struct gve_rx_ring *rx = block->rx;
1083 netdev_features_t feat;
1086 feat = block->napi.dev->features;
1089 work_done = gve_clean_rx_done(rx, budget, feat);