]> Git Repo - linux.git/commitdiff
page_pool: split types and declarations from page_pool.h
authorYunsheng Lin <[email protected]>
Fri, 4 Aug 2023 18:05:24 +0000 (20:05 +0200)
committerJakub Kicinski <[email protected]>
Mon, 7 Aug 2023 20:05:19 +0000 (13:05 -0700)
Split types and pure function declarations from page_pool.h
and add them in page_page/types.h, so that C sources can
include page_pool.h and headers should generally only include
page_pool/types.h as suggested by jakub.
Rename page_pool.h to page_pool/helpers.h to have both in
one place.

Signed-off-by: Yunsheng Lin <[email protected]>
Suggested-by: Jakub Kicinski <[email protected]>
Signed-off-by: Alexander Lobakin <[email protected]>
Reviewed-by: Alexander Duyck <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
[Jakub: change microsoft/mana, fix kdoc paths in Documentation]
Signed-off-by: Jakub Kicinski <[email protected]>
44 files changed:
Documentation/networking/page_pool.rst
MAINTAINERS
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
drivers/net/ethernet/engleder/tsnep_main.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/mvpp2/mvpp2.h
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mediatek/mtk_eth_soc.h
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
drivers/net/ethernet/microchip/lan966x/lan966x_main.h
drivers/net/ethernet/microsoft/mana/mana_en.c
drivers/net/ethernet/socionext/netsec.c
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/cpsw_new.c
drivers/net/ethernet/ti/cpsw_priv.c
drivers/net/ethernet/wangxun/libwx/wx_lib.c
drivers/net/veth.c
drivers/net/wireless/mediatek/mt76/mac80211.c
drivers/net/wireless/mediatek/mt76/mt76.h
drivers/net/xen-netfront.c
include/linux/skbuff.h
include/net/page_pool.h [deleted file]
include/net/page_pool/helpers.h [new file with mode: 0644]
include/net/page_pool/types.h [new file with mode: 0644]
include/trace/events/page_pool.h
net/bpf/test_run.c
net/core/page_pool.c
net/core/skbuff.c
net/core/xdp.c

index 53b5448cc0f11adc995f8106ccb48f5172ec20ac..68b82cea13e438fcf71620cc9fed35fe68103d56 100644 (file)
@@ -67,10 +67,10 @@ a page will cause no race conditions is enough.
 .. kernel-doc:: net/core/page_pool.c
    :identifiers: page_pool_create
 
-.. kernel-doc:: include/net/page_pool.h
+.. kernel-doc:: include/net/page_pool/types.h
    :identifiers: struct page_pool_params
 
-.. kernel-doc:: include/net/page_pool.h
+.. kernel-doc:: include/net/page_pool/helpers.h
    :identifiers: page_pool_put_page page_pool_put_full_page
                 page_pool_recycle_direct page_pool_dev_alloc_pages
                 page_pool_get_dma_addr page_pool_get_dma_dir
@@ -122,7 +122,7 @@ page_pool_stats allocated by the caller.
 The API will fill in the provided struct page_pool_stats with
 statistics about the page_pool.
 
-.. kernel-doc:: include/net/page_pool.h
+.. kernel-doc:: include/net/page_pool/types.h
    :identifiers: struct page_pool_recycle_stats
                 struct page_pool_alloc_stats
                 struct page_pool_stats
index 5e2bb1059ab67b4230be886cd2e6d134ea4bde37..08bcf3a7c482dc01278fed62635428e3a2299423 100644 (file)
@@ -16020,7 +16020,7 @@ M:      Ilias Apalodimas <[email protected]>
 L:     [email protected]
 S:     Supported
 F:     Documentation/networking/page_pool.rst
-F:     include/net/page_pool.h
+F:     include/net/page_pool/
 F:     include/trace/events/page_pool.h
 F:     net/core/page_pool.c
 
index 6a643aae78028887e2fb68572588aa4d06a31e6d..eb168ca983b753663ee99fbda1db6639a93f2fa7 100644 (file)
@@ -54,7 +54,7 @@
 #include <net/pkt_cls.h>
 #include <linux/hwmon.h>
 #include <linux/hwmon-sysfs.h>
-#include <net/page_pool.h>
+#include <net/page_pool/helpers.h>
 #include <linux/align.h>
 #include <net/netdev_queues.h>
 
index 2ce46d7affe4a09529e9febf37b0dc439d92f6c2..96f5ca778c67d609ffa3530fa5508704e0b8e98f 100644 (file)
@@ -15,7 +15,7 @@
 #include <linux/bpf.h>
 #include <linux/bpf_trace.h>
 #include <linux/filter.h>
-#include <net/page_pool.h>
+#include <net/page_pool/helpers.h>
 #include "bnxt_hsi.h"
 #include "bnxt.h"
 #include "bnxt_xdp.h"
index 079f9f6ae21aebe9689d40a90a6cef57d38c4ed5..f61bd89734c588e7c5314859f7245c7e081bba87 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/iopoll.h>
 #include <linux/bpf.h>
 #include <linux/bpf_trace.h>
+#include <net/page_pool/helpers.h>
 #include <net/xdp_sock_drv.h>
 
 #define TSNEP_RX_OFFSET (max(NET_SKB_PAD, XDP_PACKET_HEADROOM) + NET_IP_ALIGN)
index 43f14cec91e9335488f9c2bcf80e62181f081f1f..3bd0bf03aedbf0cca160676d527e2497c737adc3 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/in.h>
 #include <linux/ip.h>
 #include <net/ip.h>
+#include <net/page_pool/helpers.h>
 #include <net/selftests.h>
 #include <net/tso.h>
 #include <linux/tcp.h>
index 9f6890059666e50bde32445c801369417c882796..e5e37a33fd819777e16222f6baa1c6d584551d3b 100644 (file)
@@ -18,6 +18,7 @@
 #include <net/gre.h>
 #include <net/gro.h>
 #include <net/ip6_checksum.h>
+#include <net/page_pool/helpers.h>
 #include <net/pkt_cls.h>
 #include <net/pkt_sched.h>
 #include <net/tcp.h>
index 88af34bbee34bad0be2aa1e28678fb00c37c35a3..acd756b0c7c9a4134a0d7c47a50b8112dc881ee1 100644 (file)
@@ -6,7 +6,7 @@
 
 #include <linux/dim.h>
 #include <linux/if_vlan.h>
-#include <net/page_pool.h>
+#include <net/page_pool/types.h>
 #include <asm/barrier.h>
 
 #include "hnae3.h"
index acf4f6ba73a6f97b74b3c2a4e6b2581146d0f651..d483b8c00ec0e2dacfd5ac616b3c282e09d4fd37 100644 (file)
@@ -37,7 +37,7 @@
 #include <net/ip.h>
 #include <net/ipv6.h>
 #include <net/tso.h>
-#include <net/page_pool.h>
+#include <net/page_pool/helpers.h>
 #include <net/pkt_sched.h>
 #include <linux/bpf_trace.h>
 
index 11e603686a276661ebbf6e3c1ea7545253213eab..e809f91c08fb9daac8b7c4a89bd279c671036fb7 100644 (file)
@@ -16,7 +16,7 @@
 #include <linux/phy.h>
 #include <linux/phylink.h>
 #include <net/flow_offload.h>
-#include <net/page_pool.h>
+#include <net/page_pool/types.h>
 #include <linux/bpf.h>
 #include <net/xdp.h>
 
index 9e1b596c8f088846d4b27e9d58f182f0bfdcef93..eb74ccddb4409762cbc01cf24b9bb53a95dd250d 100644 (file)
@@ -35,6 +35,7 @@
 #include <uapi/linux/ppp_defs.h>
 #include <net/ip.h>
 #include <net/ipv6.h>
+#include <net/page_pool/helpers.h>
 #include <net/tso.h>
 #include <linux/bpf_trace.h>
 
index 8cdd92dd97628f7f81cd9f7cee1d9132aa61181e..8336cea16aff01661f424fd0535d18692ce9d9d1 100644 (file)
@@ -7,6 +7,7 @@
 
 #include <linux/interrupt.h>
 #include <linux/pci.h>
+#include <net/page_pool/helpers.h>
 #include <net/tso.h>
 #include <linux/bitfield.h>
 
index 61f62a6ec6627cd006e34df8ac37feedc4dd8853..70b9065f7d10188180b8aa16e2b37c85d1ff7176 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/bpf.h>
 #include <linux/bpf_trace.h>
 #include <linux/bitfield.h>
+#include <net/page_pool/types.h>
 
 #include "otx2_reg.h"
 #include "otx2_common.h"
index 1b89f800f6dff7de04addde31648f50305e5f6a5..fe05c90202699ff5e8af1adcc8461ad83dea94a6 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/bitfield.h>
 #include <net/dsa.h>
 #include <net/dst_metadata.h>
+#include <net/page_pool/helpers.h>
 
 #include "mtk_eth_soc.h"
 #include "mtk_wed.h"
index 80d17729e557846b3ff027427757992e36e199e0..4a2470fbad2cf012579178521935b71ddcb055b7 100644 (file)
@@ -18,7 +18,7 @@
 #include <linux/rhashtable.h>
 #include <linux/dim.h>
 #include <linux/bitfield.h>
-#include <net/page_pool.h>
+#include <net/page_pool/types.h>
 #include <linux/bpf_trace.h>
 #include "mtk_ppe.h"
 
index 5ce28ff7685fcedd66994c3cc98340eda7eef513..e097f336e1c4a0ff1543bab027a4700aef8a6be2 100644 (file)
@@ -6,6 +6,7 @@
 #include "en/port.h"
 #include "en_accel/en_accel.h"
 #include "en_accel/ipsec.h"
+#include <net/page_pool/types.h>
 #include <net/xdp_sock_drv.h>
 
 static u8 mlx5e_mpwrq_min_page_shift(struct mlx5_core_dev *mdev)
index 201ac7dd338f06181f778000a3e43e363af6484d..698647cc8c0f92dab81697880df065019b8b1db8 100644 (file)
@@ -1,7 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
 /* Copyright (c) 2020 Mellanox Technologies */
 
-#include <net/page_pool.h>
 #include "en/txrx.h"
 #include "en/params.h"
 #include "en/trap.h"
index 40589cebb773002739746987f3d1089d76de2ffb..12f56d0db0af2f904866b75d159a00a05a9e2f7a 100644 (file)
@@ -35,6 +35,7 @@
 #include "en/xdp.h"
 #include "en/params.h"
 #include <linux/bitfield.h>
+#include <net/page_pool/helpers.h>
 
 int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk)
 {
index 1c820119e438f0cd119f891080e16628313971cb..c8ec6467d4d16d80e1ec370012d7528d6051cb06 100644 (file)
@@ -38,7 +38,7 @@
 #include <linux/debugfs.h>
 #include <linux/if_bridge.h>
 #include <linux/filter.h>
-#include <net/page_pool.h>
+#include <net/page_pool/types.h>
 #include <net/pkt_sched.h>
 #include <net/xdp_sock_drv.h>
 #include "eswitch.h"
index f7bb5f4aaaca0170b6d94536d5f53cfbe1539a39..3fd11b0761e09e4af026bd82a44ee3f812261fd0 100644 (file)
@@ -36,7 +36,7 @@
 #include <linux/bitmap.h>
 #include <linux/filter.h>
 #include <net/ip6_checksum.h>
-#include <net/page_pool.h>
+#include <net/page_pool/helpers.h>
 #include <net/inet_ecn.h>
 #include <net/gro.h>
 #include <net/udp.h>
index 4d77055abd4be3659672663e97c6b4269435119c..07b84d668fcc02ff046f2076bfa7faccb796e228 100644 (file)
@@ -38,7 +38,7 @@
 #include "en/port.h"
 
 #ifdef CONFIG_PAGE_POOL_STATS
-#include <net/page_pool.h>
+#include <net/page_pool/helpers.h>
 #endif
 
 static unsigned int stats_grps_num(struct mlx5e_priv *priv)
index bd72fbc2220f3010afd8b90f3704e261b9d0a98f..3960534ac2ad812b7bb41fa359b9d20d6e5197e3 100644 (file)
@@ -2,6 +2,7 @@
 
 #include <linux/bpf.h>
 #include <linux/filter.h>
+#include <net/page_pool/helpers.h>
 
 #include "lan966x_main.h"
 
index aebc9154693af14a9d8336cc33a5c2740a2f7abb..caa9e0533c96b3c31d956ee3f01eb7e59bab591c 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/phy.h>
 #include <linux/phylink.h>
 #include <linux/ptp_clock_kernel.h>
-#include <net/page_pool.h>
+#include <net/page_pool/types.h>
 #include <net/pkt_cls.h>
 #include <net/pkt_sched.h>
 #include <net/switchdev.h>
index a08023c57e25ef9f7082c82857e8e198b55c555f..31e2f2c74e15a96cefec375ae6fe9cf023f56a38 100644 (file)
@@ -11,6 +11,7 @@
 
 #include <net/checksum.h>
 #include <net/ip6_checksum.h>
+#include <net/page_pool/helpers.h>
 #include <net/xdp.h>
 
 #include <net/mana/mana.h>
index 0dcd6a568b061242f510dd460dde5798895ab1a5..f358ea003193698cb48ed0362be9e9f96e90d6db 100644 (file)
@@ -15,7 +15,7 @@
 #include <linux/bpf_trace.h>
 
 #include <net/tcp.h>
-#include <net/page_pool.h>
+#include <net/page_pool/helpers.h>
 #include <net/ip6_checksum.h>
 
 #define NETSEC_REG_SOFT_RST                    0x104
index a6d034968654d2f492661d585b0947ed42dbf4e5..3401e888a9f6860b835a115a80102d01ea44ebff 100644 (file)
@@ -21,7 +21,7 @@
 #include <linux/ptp_clock_kernel.h>
 #include <linux/net_tstamp.h>
 #include <linux/reset.h>
-#include <net/page_pool.h>
+#include <net/page_pool/types.h>
 #include <net/xdp.h>
 #include <uapi/linux/bpf.h>
 
index 99aa5360b3ff91bb3816a1f8d7431c07e2f7ad3e..fcab363d8dfa245269d6c3729951aa7fc4de37e1 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/phylink.h>
 #include <linux/udp.h>
 #include <linux/bpf_trace.h>
+#include <net/page_pool/helpers.h>
 #include <net/pkt_cls.h>
 #include <net/xdp_sock_drv.h>
 #include "stmmac_ptp.h"
index f9cd566d1c9b588e5cd547ca4109567dc50114e4..ca4d4548f85e300721a7608066b778f2a2335aca 100644 (file)
@@ -31,7 +31,7 @@
 #include <linux/if_vlan.h>
 #include <linux/kmemleak.h>
 #include <linux/sys_soc.h>
-#include <net/page_pool.h>
+#include <net/page_pool/helpers.h>
 #include <linux/bpf.h>
 #include <linux/bpf_trace.h>
 
index c61e4e44a78f06ddbd28e94ce65e2af410291fe7..0e4f526b17532e94d0634ad8611371934dad7717 100644 (file)
@@ -30,7 +30,7 @@
 #include <linux/sys_soc.h>
 
 #include <net/switchdev.h>
-#include <net/page_pool.h>
+#include <net/page_pool/helpers.h>
 #include <net/pkt_cls.h>
 #include <net/devlink.h>
 
index ae52cdbcf8cc55f1846b154ffc833080d5005617..0ec85635dfd60a2f169f2c6aab5222842f2d3ae5 100644 (file)
@@ -18,7 +18,7 @@
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/skbuff.h>
-#include <net/page_pool.h>
+#include <net/page_pool/helpers.h>
 #include <net/pkt_cls.h>
 #include <net/pkt_sched.h>
 
index 2c3f08be8c37432b569834152064c56d2a138a5c..e04d4a5eed7ba0c06ecb7c4040e3fb4f837c2134 100644 (file)
@@ -3,7 +3,7 @@
 
 #include <linux/etherdevice.h>
 #include <net/ip6_checksum.h>
-#include <net/page_pool.h>
+#include <net/page_pool/helpers.h>
 #include <net/inet_ecn.h>
 #include <linux/iopoll.h>
 #include <linux/sctp.h>
index 614f3e3efab0950fa4bbe1ee897ba008fc5e3369..953f6d8f8db04d5341fb7ac5de27ffa45e761fcf 100644 (file)
@@ -26,7 +26,7 @@
 #include <linux/ptr_ring.h>
 #include <linux/bpf_trace.h>
 #include <linux/net_tstamp.h>
-#include <net/page_pool.h>
+#include <net/page_pool/helpers.h>
 
 #define DRV_NAME       "veth"
 #define DRV_VERSION    "1.0"
index c0ff36a98bed3e36a3462483d410d3815236c21d..d158320bc15dbea865cdd891655eb608d999c2c0 100644 (file)
@@ -4,7 +4,6 @@
  */
 #include <linux/sched.h>
 #include <linux/of.h>
-#include <net/page_pool.h>
 #include "mt76.h"
 
 #define CHAN2G(_idx, _freq) {                  \
index 878087257ea7e45c56c42d8ea20bb77a1aba193e..e8757865a3d068f1a4b7bd88d85dff8827a2c1aa 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/average.h>
 #include <linux/soc/mediatek/mtk_wed.h>
 #include <net/mac80211.h>
+#include <net/page_pool/helpers.h>
 #include "util.h"
 #include "testmode.h"
 
index 47d54d8ea59d17a94a9fa08d50d06eee45743bfe..ad29f370034e4f080ca7350e8114093446f798e6 100644 (file)
@@ -45,7 +45,7 @@
 #include <linux/slab.h>
 #include <net/ip.h>
 #include <linux/bpf.h>
-#include <net/page_pool.h>
+#include <net/page_pool/types.h>
 #include <linux/bpf_trace.h>
 
 #include <xen/xen.h>
index 16a49ba534e4aa3fbb6c4ebe29aa8e9ef7bb4c76..888e3d7e74c1facd2e164f94385e482cc11c7b40 100644 (file)
@@ -32,7 +32,7 @@
 #include <linux/if_packet.h>
 #include <linux/llist.h>
 #include <net/flow.h>
-#include <net/page_pool.h>
+#include <net/page_pool/types.h>
 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
 #include <linux/netfilter/nf_conntrack_common.h>
 #endif
diff --git a/include/net/page_pool.h b/include/net/page_pool.h
deleted file mode 100644 (file)
index 73d4f78..0000000
+++ /dev/null
@@ -1,470 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * page_pool.h
- *     Author: Jesper Dangaard Brouer <[email protected]>
- *     Copyright (C) 2016 Red Hat, Inc.
- */
-
-/**
- * DOC: page_pool allocator
- *
- * This page_pool allocator is optimized for the XDP mode that
- * uses one-frame-per-page, but have fallbacks that act like the
- * regular page allocator APIs.
- *
- * Basic use involve replacing alloc_pages() calls with the
- * page_pool_alloc_pages() call.  Drivers should likely use
- * page_pool_dev_alloc_pages() replacing dev_alloc_pages().
- *
- * API keeps track of in-flight pages, in-order to let API user know
- * when it is safe to dealloactor page_pool object.  Thus, API users
- * must call page_pool_put_page() where appropriate and only attach
- * the page to a page_pool-aware objects, like skbs marked for recycling.
- *
- * API user must only call page_pool_put_page() once on a page, as it
- * will either recycle the page, or in case of elevated refcnt, it
- * will release the DMA mapping and in-flight state accounting.  We
- * hope to lift this requirement in the future.
- */
-#ifndef _NET_PAGE_POOL_H
-#define _NET_PAGE_POOL_H
-
-#include <linux/mm.h> /* Needed by ptr_ring */
-#include <linux/ptr_ring.h>
-#include <linux/dma-direction.h>
-
-#define PP_FLAG_DMA_MAP                BIT(0) /* Should page_pool do the DMA
-                                       * map/unmap
-                                       */
-#define PP_FLAG_DMA_SYNC_DEV   BIT(1) /* If set all pages that the driver gets
-                                       * from page_pool will be
-                                       * DMA-synced-for-device according to
-                                       * the length provided by the device
-                                       * driver.
-                                       * Please note DMA-sync-for-CPU is still
-                                       * device driver responsibility
-                                       */
-#define PP_FLAG_PAGE_FRAG      BIT(2) /* for page frag feature */
-#define PP_FLAG_ALL            (PP_FLAG_DMA_MAP |\
-                                PP_FLAG_DMA_SYNC_DEV |\
-                                PP_FLAG_PAGE_FRAG)
-
-/*
- * Fast allocation side cache array/stack
- *
- * The cache size and refill watermark is related to the network
- * use-case.  The NAPI budget is 64 packets.  After a NAPI poll the RX
- * ring is usually refilled and the max consumed elements will be 64,
- * thus a natural max size of objects needed in the cache.
- *
- * Keeping room for more objects, is due to XDP_DROP use-case.  As
- * XDP_DROP allows the opportunity to recycle objects directly into
- * this array, as it shares the same softirq/NAPI protection.  If
- * cache is already full (or partly full) then the XDP_DROP recycles
- * would have to take a slower code path.
- */
-#define PP_ALLOC_CACHE_SIZE    128
-#define PP_ALLOC_CACHE_REFILL  64
-struct pp_alloc_cache {
-       u32 count;
-       struct page *cache[PP_ALLOC_CACHE_SIZE];
-};
-
-/**
- * struct page_pool_params - page pool parameters
- * @flags:     PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV, PP_FLAG_PAGE_FRAG
- * @order:     2^order pages on allocation
- * @pool_size: size of the ptr_ring
- * @nid:       NUMA node id to allocate from pages from
- * @dev:       device, for DMA pre-mapping purposes
- * @napi:      NAPI which is the sole consumer of pages, otherwise NULL
- * @dma_dir:   DMA mapping direction
- * @max_len:   max DMA sync memory size for PP_FLAG_DMA_SYNC_DEV
- * @offset:    DMA sync address offset for PP_FLAG_DMA_SYNC_DEV
- */
-struct page_pool_params {
-       unsigned int    flags;
-       unsigned int    order;
-       unsigned int    pool_size;
-       int             nid;
-       struct device   *dev;
-       struct napi_struct *napi;
-       enum dma_data_direction dma_dir;
-       unsigned int    max_len;
-       unsigned int    offset;
-/* private: used by test code only */
-       void (*init_callback)(struct page *page, void *arg);
-       void *init_arg;
-};
-
-#ifdef CONFIG_PAGE_POOL_STATS
-/**
- * struct page_pool_alloc_stats - allocation statistics
- * @fast:      successful fast path allocations
- * @slow:      slow path order-0 allocations
- * @slow_high_order: slow path high order allocations
- * @empty:     ptr ring is empty, so a slow path allocation was forced
- * @refill:    an allocation which triggered a refill of the cache
- * @waive:     pages obtained from the ptr ring that cannot be added to
- *             the cache due to a NUMA mismatch
- */
-struct page_pool_alloc_stats {
-       u64 fast;
-       u64 slow;
-       u64 slow_high_order;
-       u64 empty;
-       u64 refill;
-       u64 waive;
-};
-
-/**
- * struct page_pool_recycle_stats - recycling (freeing) statistics
- * @cached:    recycling placed page in the page pool cache
- * @cache_full:        page pool cache was full
- * @ring:      page placed into the ptr ring
- * @ring_full: page released from page pool because the ptr ring was full
- * @released_refcnt:   page released (and not recycled) because refcnt > 1
- */
-struct page_pool_recycle_stats {
-       u64 cached;
-       u64 cache_full;
-       u64 ring;
-       u64 ring_full;
-       u64 released_refcnt;
-};
-
-/**
- * struct page_pool_stats - combined page pool use statistics
- * @alloc_stats:       see struct page_pool_alloc_stats
- * @recycle_stats:     see struct page_pool_recycle_stats
- *
- * Wrapper struct for combining page pool stats with different storage
- * requirements.
- */
-struct page_pool_stats {
-       struct page_pool_alloc_stats alloc_stats;
-       struct page_pool_recycle_stats recycle_stats;
-};
-
-int page_pool_ethtool_stats_get_count(void);
-u8 *page_pool_ethtool_stats_get_strings(u8 *data);
-u64 *page_pool_ethtool_stats_get(u64 *data, void *stats);
-
-/*
- * Drivers that wish to harvest page pool stats and report them to users
- * (perhaps via ethtool, debugfs, or another mechanism) can allocate a
- * struct page_pool_stats call page_pool_get_stats to get stats for the specified pool.
- */
-bool page_pool_get_stats(struct page_pool *pool,
-                        struct page_pool_stats *stats);
-#else
-
-static inline int page_pool_ethtool_stats_get_count(void)
-{
-       return 0;
-}
-
-static inline u8 *page_pool_ethtool_stats_get_strings(u8 *data)
-{
-       return data;
-}
-
-static inline u64 *page_pool_ethtool_stats_get(u64 *data, void *stats)
-{
-       return data;
-}
-
-#endif
-
-struct page_pool {
-       struct page_pool_params p;
-
-       struct delayed_work release_dw;
-       void (*disconnect)(void *);
-       unsigned long defer_start;
-       unsigned long defer_warn;
-
-       u32 pages_state_hold_cnt;
-       unsigned int frag_offset;
-       struct page *frag_page;
-       long frag_users;
-
-#ifdef CONFIG_PAGE_POOL_STATS
-       /* these stats are incremented while in softirq context */
-       struct page_pool_alloc_stats alloc_stats;
-#endif
-       u32 xdp_mem_id;
-
-       /*
-        * Data structure for allocation side
-        *
-        * Drivers allocation side usually already perform some kind
-        * of resource protection.  Piggyback on this protection, and
-        * require driver to protect allocation side.
-        *
-        * For NIC drivers this means, allocate a page_pool per
-        * RX-queue. As the RX-queue is already protected by
-        * Softirq/BH scheduling and napi_schedule. NAPI schedule
-        * guarantee that a single napi_struct will only be scheduled
-        * on a single CPU (see napi_schedule).
-        */
-       struct pp_alloc_cache alloc ____cacheline_aligned_in_smp;
-
-       /* Data structure for storing recycled pages.
-        *
-        * Returning/freeing pages is more complicated synchronization
-        * wise, because free's can happen on remote CPUs, with no
-        * association with allocation resource.
-        *
-        * Use ptr_ring, as it separates consumer and producer
-        * effeciently, it a way that doesn't bounce cache-lines.
-        *
-        * TODO: Implement bulk return pages into this structure.
-        */
-       struct ptr_ring ring;
-
-#ifdef CONFIG_PAGE_POOL_STATS
-       /* recycle stats are per-cpu to avoid locking */
-       struct page_pool_recycle_stats __percpu *recycle_stats;
-#endif
-       atomic_t pages_state_release_cnt;
-
-       /* A page_pool is strictly tied to a single RX-queue being
-        * protected by NAPI, due to above pp_alloc_cache. This
-        * refcnt serves purpose is to simplify drivers error handling.
-        */
-       refcount_t user_cnt;
-
-       u64 destroy_cnt;
-};
-
-struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
-
-/**
- * page_pool_dev_alloc_pages() - allocate a page.
- * @pool:      pool from which to allocate
- *
- * Get a page from the page allocator or page_pool caches.
- */
-static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
-{
-       gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
-
-       return page_pool_alloc_pages(pool, gfp);
-}
-
-struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset,
-                                 unsigned int size, gfp_t gfp);
-
-static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool,
-                                                   unsigned int *offset,
-                                                   unsigned int size)
-{
-       gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
-
-       return page_pool_alloc_frag(pool, offset, size, gfp);
-}
-
-/**
- * page_pool_get_dma_dir() - Retrieve the stored DMA direction.
- * @pool:      pool from which page was allocated
- *
- * Get the stored dma direction. A driver might decide to store this locally
- * and avoid the extra cache line from page_pool to determine the direction.
- */
-static
-inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool)
-{
-       return pool->p.dma_dir;
-}
-
-bool page_pool_return_skb_page(struct page *page, bool napi_safe);
-
-struct page_pool *page_pool_create(const struct page_pool_params *params);
-
-struct xdp_mem_info;
-
-#ifdef CONFIG_PAGE_POOL
-void page_pool_unlink_napi(struct page_pool *pool);
-void page_pool_destroy(struct page_pool *pool);
-void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
-                          struct xdp_mem_info *mem);
-void page_pool_put_page_bulk(struct page_pool *pool, void **data,
-                            int count);
-#else
-static inline void page_pool_unlink_napi(struct page_pool *pool)
-{
-}
-
-static inline void page_pool_destroy(struct page_pool *pool)
-{
-}
-
-static inline void page_pool_use_xdp_mem(struct page_pool *pool,
-                                        void (*disconnect)(void *),
-                                        struct xdp_mem_info *mem)
-{
-}
-
-static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data,
-                                          int count)
-{
-}
-#endif
-
-void page_pool_put_defragged_page(struct page_pool *pool, struct page *page,
-                                 unsigned int dma_sync_size,
-                                 bool allow_direct);
-
-/* pp_frag_count represents the number of writers who can update the page
- * either by updating skb->data or via DMA mappings for the device.
- * We can't rely on the page refcnt for that as we don't know who might be
- * holding page references and we can't reliably destroy or sync DMA mappings
- * of the fragments.
- *
- * When pp_frag_count reaches 0 we can either recycle the page if the page
- * refcnt is 1 or return it back to the memory allocator and destroy any
- * mappings we have.
- */
-static inline void page_pool_fragment_page(struct page *page, long nr)
-{
-       atomic_long_set(&page->pp_frag_count, nr);
-}
-
-static inline long page_pool_defrag_page(struct page *page, long nr)
-{
-       long ret;
-
-       /* If nr == pp_frag_count then we have cleared all remaining
-        * references to the page. No need to actually overwrite it, instead
-        * we can leave this to be overwritten by the calling function.
-        *
-        * The main advantage to doing this is that an atomic_read is
-        * generally a much cheaper operation than an atomic update,
-        * especially when dealing with a page that may be partitioned
-        * into only 2 or 3 pieces.
-        */
-       if (atomic_long_read(&page->pp_frag_count) == nr)
-               return 0;
-
-       ret = atomic_long_sub_return(nr, &page->pp_frag_count);
-       WARN_ON(ret < 0);
-       return ret;
-}
-
-static inline bool page_pool_is_last_frag(struct page_pool *pool,
-                                         struct page *page)
-{
-       /* If fragments aren't enabled or count is 0 we were the last user */
-       return !(pool->p.flags & PP_FLAG_PAGE_FRAG) ||
-              (page_pool_defrag_page(page, 1) == 0);
-}
-
-/**
- * page_pool_put_page() - release a reference to a page pool page
- * @pool:      pool from which page was allocated
- * @page:      page to release a reference on
- * @dma_sync_size: how much of the page may have been touched by the device
- * @allow_direct: released by the consumer, allow lockless caching
- *
- * The outcome of this depends on the page refcnt. If the driver bumps
- * the refcnt > 1 this will unmap the page. If the page refcnt is 1
- * the allocator owns the page and will try to recycle it in one of the pool
- * caches. If PP_FLAG_DMA_SYNC_DEV is set, the page will be synced for_device
- * using dma_sync_single_range_for_device().
- */
-static inline void page_pool_put_page(struct page_pool *pool,
-                                     struct page *page,
-                                     unsigned int dma_sync_size,
-                                     bool allow_direct)
-{
-       /* When page_pool isn't compiled-in, net/core/xdp.c doesn't
-        * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
-        */
-#ifdef CONFIG_PAGE_POOL
-       if (!page_pool_is_last_frag(pool, page))
-               return;
-
-       page_pool_put_defragged_page(pool, page, dma_sync_size, allow_direct);
-#endif
-}
-
-/**
- * page_pool_put_full_page() - release a reference on a page pool page
- * @pool:      pool from which page was allocated
- * @page:      page to release a reference on
- * @allow_direct: released by the consumer, allow lockless caching
- *
- * Similar to page_pool_put_page(), but will DMA sync the entire memory area
- * as configured in &page_pool_params.max_len.
- */
-static inline void page_pool_put_full_page(struct page_pool *pool,
-                                          struct page *page, bool allow_direct)
-{
-       page_pool_put_page(pool, page, -1, allow_direct);
-}
-
-/**
- * page_pool_recycle_direct() - release a reference on a page pool page
- * @pool:      pool from which page was allocated
- * @page:      page to release a reference on
- *
- * Similar to page_pool_put_full_page() but caller must guarantee safe context
- * (e.g NAPI), since it will recycle the page directly into the pool fast cache.
- */
-static inline void page_pool_recycle_direct(struct page_pool *pool,
-                                           struct page *page)
-{
-       page_pool_put_full_page(pool, page, true);
-}
-
-#define PAGE_POOL_DMA_USE_PP_FRAG_COUNT        \
-               (sizeof(dma_addr_t) > sizeof(unsigned long))
-
-/**
- * page_pool_get_dma_addr() - Retrieve the stored DMA address.
- * @page:      page allocated from a page pool
- *
- * Fetch the DMA address of the page. The page pool to which the page belongs
- * must had been created with PP_FLAG_DMA_MAP.
- */
-static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
-{
-       dma_addr_t ret = page->dma_addr;
-
-       if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
-               ret |= (dma_addr_t)page->dma_addr_upper << 16 << 16;
-
-       return ret;
-}
-
-static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
-{
-       page->dma_addr = addr;
-       if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
-               page->dma_addr_upper = upper_32_bits(addr);
-}
-
-static inline bool is_page_pool_compiled_in(void)
-{
-#ifdef CONFIG_PAGE_POOL
-       return true;
-#else
-       return false;
-#endif
-}
-
-static inline bool page_pool_put(struct page_pool *pool)
-{
-       return refcount_dec_and_test(&pool->user_cnt);
-}
-
-/* Caller must provide appropriate safe context, e.g. NAPI. */
-void page_pool_update_nid(struct page_pool *pool, int new_nid);
-static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
-{
-       if (unlikely(pool->p.nid != new_nid))
-               page_pool_update_nid(pool, new_nid);
-}
-
-#endif /* _NET_PAGE_POOL_H */
diff --git a/include/net/page_pool/helpers.h b/include/net/page_pool/helpers.h
new file mode 100644 (file)
index 0000000..78df918
--- /dev/null
@@ -0,0 +1,238 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * page_pool/helpers.h
+ *     Author: Jesper Dangaard Brouer <[email protected]>
+ *     Copyright (C) 2016 Red Hat, Inc.
+ */
+
+/**
+ * DOC: page_pool allocator
+ *
+ * This page_pool allocator is optimized for the XDP mode that
+ * uses one-frame-per-page, but have fallbacks that act like the
+ * regular page allocator APIs.
+ *
+ * Basic use involve replacing alloc_pages() calls with the
+ * page_pool_alloc_pages() call.  Drivers should likely use
+ * page_pool_dev_alloc_pages() replacing dev_alloc_pages().
+ *
+ * API keeps track of in-flight pages, in-order to let API user know
+ * when it is safe to dealloactor page_pool object.  Thus, API users
+ * must call page_pool_put_page() where appropriate and only attach
+ * the page to a page_pool-aware objects, like skbs marked for recycling.
+ *
+ * API user must only call page_pool_put_page() once on a page, as it
+ * will either recycle the page, or in case of elevated refcnt, it
+ * will release the DMA mapping and in-flight state accounting.  We
+ * hope to lift this requirement in the future.
+ */
+#ifndef _NET_PAGE_POOL_HELPERS_H
+#define _NET_PAGE_POOL_HELPERS_H
+
+#include <net/page_pool/types.h>
+
+#ifdef CONFIG_PAGE_POOL_STATS
+int page_pool_ethtool_stats_get_count(void);
+u8 *page_pool_ethtool_stats_get_strings(u8 *data);
+u64 *page_pool_ethtool_stats_get(u64 *data, void *stats);
+
+/*
+ * Drivers that wish to harvest page pool stats and report them to users
+ * (perhaps via ethtool, debugfs, or another mechanism) can allocate a
+ * struct page_pool_stats call page_pool_get_stats to get stats for the specified pool.
+ */
+bool page_pool_get_stats(struct page_pool *pool,
+                        struct page_pool_stats *stats);
+#else
+static inline int page_pool_ethtool_stats_get_count(void)
+{
+       return 0;
+}
+
+static inline u8 *page_pool_ethtool_stats_get_strings(u8 *data)
+{
+       return data;
+}
+
+static inline u64 *page_pool_ethtool_stats_get(u64 *data, void *stats)
+{
+       return data;
+}
+#endif
+
+/**
+ * page_pool_dev_alloc_pages() - allocate a page.
+ * @pool:      pool from which to allocate
+ *
+ * Get a page from the page allocator or page_pool caches.
+ */
+static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
+{
+       gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
+
+       return page_pool_alloc_pages(pool, gfp);
+}
+
+static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool,
+                                                   unsigned int *offset,
+                                                   unsigned int size)
+{
+       gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
+
+       return page_pool_alloc_frag(pool, offset, size, gfp);
+}
+
+/**
+ * page_pool_get_dma_dir() - Retrieve the stored DMA direction.
+ * @pool:      pool from which page was allocated
+ *
+ * Get the stored dma direction. A driver might decide to store this locally
+ * and avoid the extra cache line from page_pool to determine the direction.
+ */
+static
+inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool)
+{
+       return pool->p.dma_dir;
+}
+
+/* pp_frag_count represents the number of writers who can update the page
+ * either by updating skb->data or via DMA mappings for the device.
+ * We can't rely on the page refcnt for that as we don't know who might be
+ * holding page references and we can't reliably destroy or sync DMA mappings
+ * of the fragments.
+ *
+ * When pp_frag_count reaches 0 we can either recycle the page if the page
+ * refcnt is 1 or return it back to the memory allocator and destroy any
+ * mappings we have.
+ */
+static inline void page_pool_fragment_page(struct page *page, long nr)
+{
+       atomic_long_set(&page->pp_frag_count, nr);
+}
+
+static inline long page_pool_defrag_page(struct page *page, long nr)
+{
+       long ret;
+
+       /* If nr == pp_frag_count then we have cleared all remaining
+        * references to the page. No need to actually overwrite it, instead
+        * we can leave this to be overwritten by the calling function.
+        *
+        * The main advantage to doing this is that an atomic_read is
+        * generally a much cheaper operation than an atomic update,
+        * especially when dealing with a page that may be partitioned
+        * into only 2 or 3 pieces.
+        */
+       if (atomic_long_read(&page->pp_frag_count) == nr)
+               return 0;
+
+       ret = atomic_long_sub_return(nr, &page->pp_frag_count);
+       WARN_ON(ret < 0);
+       return ret;
+}
+
+static inline bool page_pool_is_last_frag(struct page_pool *pool,
+                                         struct page *page)
+{
+       /* If fragments aren't enabled or count is 0 we were the last user */
+       return !(pool->p.flags & PP_FLAG_PAGE_FRAG) ||
+              (page_pool_defrag_page(page, 1) == 0);
+}
+
+/**
+ * page_pool_put_page() - release a reference to a page pool page
+ * @pool:      pool from which page was allocated
+ * @page:      page to release a reference on
+ * @dma_sync_size: how much of the page may have been touched by the device
+ * @allow_direct: released by the consumer, allow lockless caching
+ *
+ * The outcome of this depends on the page refcnt. If the driver bumps
+ * the refcnt > 1 this will unmap the page. If the page refcnt is 1
+ * the allocator owns the page and will try to recycle it in one of the pool
+ * caches. If PP_FLAG_DMA_SYNC_DEV is set, the page will be synced for_device
+ * using dma_sync_single_range_for_device().
+ */
+static inline void page_pool_put_page(struct page_pool *pool,
+                                     struct page *page,
+                                     unsigned int dma_sync_size,
+                                     bool allow_direct)
+{
+       /* When page_pool isn't compiled-in, net/core/xdp.c doesn't
+        * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
+        */
+#ifdef CONFIG_PAGE_POOL
+       if (!page_pool_is_last_frag(pool, page))
+               return;
+
+       page_pool_put_defragged_page(pool, page, dma_sync_size, allow_direct);
+#endif
+}
+
+/**
+ * page_pool_put_full_page() - release a reference on a page pool page
+ * @pool:      pool from which page was allocated
+ * @page:      page to release a reference on
+ * @allow_direct: released by the consumer, allow lockless caching
+ *
+ * Similar to page_pool_put_page(), but will DMA sync the entire memory area
+ * as configured in &page_pool_params.max_len.
+ */
+static inline void page_pool_put_full_page(struct page_pool *pool,
+                                          struct page *page, bool allow_direct)
+{
+       page_pool_put_page(pool, page, -1, allow_direct);
+}
+
+/**
+ * page_pool_recycle_direct() - release a reference on a page pool page
+ * @pool:      pool from which page was allocated
+ * @page:      page to release a reference on
+ *
+ * Similar to page_pool_put_full_page() but caller must guarantee safe context
+ * (e.g NAPI), since it will recycle the page directly into the pool fast cache.
+ */
+static inline void page_pool_recycle_direct(struct page_pool *pool,
+                                           struct page *page)
+{
+       page_pool_put_full_page(pool, page, true);
+}
+
+#define PAGE_POOL_DMA_USE_PP_FRAG_COUNT        \
+               (sizeof(dma_addr_t) > sizeof(unsigned long))
+
+/**
+ * page_pool_get_dma_addr() - Retrieve the stored DMA address.
+ * @page:      page allocated from a page pool
+ *
+ * Fetch the DMA address of the page. The page pool to which the page belongs
+ * must had been created with PP_FLAG_DMA_MAP.
+ */
+static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
+{
+       dma_addr_t ret = page->dma_addr;
+
+       if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
+               ret |= (dma_addr_t)page->dma_addr_upper << 16 << 16;
+
+       return ret;
+}
+
+static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
+{
+       page->dma_addr = addr;
+       if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
+               page->dma_addr_upper = upper_32_bits(addr);
+}
+
+static inline bool page_pool_put(struct page_pool *pool)
+{
+       return refcount_dec_and_test(&pool->user_cnt);
+}
+
+static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
+{
+       if (unlikely(pool->p.nid != new_nid))
+               page_pool_update_nid(pool, new_nid);
+}
+
+#endif /* _NET_PAGE_POOL_HELPERS_H */
diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h
new file mode 100644 (file)
index 0000000..9ac3919
--- /dev/null
@@ -0,0 +1,238 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _NET_PAGE_POOL_TYPES_H
+#define _NET_PAGE_POOL_TYPES_H
+
+#include <linux/dma-direction.h>
+#include <linux/ptr_ring.h>
+
+#define PP_FLAG_DMA_MAP                BIT(0) /* Should page_pool do the DMA
+                                       * map/unmap
+                                       */
+#define PP_FLAG_DMA_SYNC_DEV   BIT(1) /* If set all pages that the driver gets
+                                       * from page_pool will be
+                                       * DMA-synced-for-device according to
+                                       * the length provided by the device
+                                       * driver.
+                                       * Please note DMA-sync-for-CPU is still
+                                       * device driver responsibility
+                                       */
+#define PP_FLAG_PAGE_FRAG      BIT(2) /* for page frag feature */
+#define PP_FLAG_ALL            (PP_FLAG_DMA_MAP |\
+                                PP_FLAG_DMA_SYNC_DEV |\
+                                PP_FLAG_PAGE_FRAG)
+
+/*
+ * Fast allocation side cache array/stack
+ *
+ * The cache size and refill watermark is related to the network
+ * use-case.  The NAPI budget is 64 packets.  After a NAPI poll the RX
+ * ring is usually refilled and the max consumed elements will be 64,
+ * thus a natural max size of objects needed in the cache.
+ *
+ * Keeping room for more objects, is due to XDP_DROP use-case.  As
+ * XDP_DROP allows the opportunity to recycle objects directly into
+ * this array, as it shares the same softirq/NAPI protection.  If
+ * cache is already full (or partly full) then the XDP_DROP recycles
+ * would have to take a slower code path.
+ */
+#define PP_ALLOC_CACHE_SIZE    128
+#define PP_ALLOC_CACHE_REFILL  64
+struct pp_alloc_cache {
+       u32 count;
+       struct page *cache[PP_ALLOC_CACHE_SIZE];
+};
+
+/**
+ * struct page_pool_params - page pool parameters
+ * @flags:     PP_FLAG_DMA_MAP, PP_FLAG_DMA_SYNC_DEV, PP_FLAG_PAGE_FRAG
+ * @order:     2^order pages on allocation
+ * @pool_size: size of the ptr_ring
+ * @nid:       NUMA node id to allocate from pages from
+ * @dev:       device, for DMA pre-mapping purposes
+ * @napi:      NAPI which is the sole consumer of pages, otherwise NULL
+ * @dma_dir:   DMA mapping direction
+ * @max_len:   max DMA sync memory size for PP_FLAG_DMA_SYNC_DEV
+ * @offset:    DMA sync address offset for PP_FLAG_DMA_SYNC_DEV
+ */
+struct page_pool_params {
+       unsigned int    flags;
+       unsigned int    order;
+       unsigned int    pool_size;
+       int             nid;
+       struct device   *dev;
+       struct napi_struct *napi;
+       enum dma_data_direction dma_dir;
+       unsigned int    max_len;
+       unsigned int    offset;
+/* private: used by test code only */
+       void (*init_callback)(struct page *page, void *arg);
+       void *init_arg;
+};
+
+#ifdef CONFIG_PAGE_POOL_STATS
+/**
+ * struct page_pool_alloc_stats - allocation statistics
+ * @fast:      successful fast path allocations
+ * @slow:      slow path order-0 allocations
+ * @slow_high_order: slow path high order allocations
+ * @empty:     ptr ring is empty, so a slow path allocation was forced
+ * @refill:    an allocation which triggered a refill of the cache
+ * @waive:     pages obtained from the ptr ring that cannot be added to
+ *             the cache due to a NUMA mismatch
+ */
+struct page_pool_alloc_stats {
+       u64 fast;
+       u64 slow;
+       u64 slow_high_order;
+       u64 empty;
+       u64 refill;
+       u64 waive;
+};
+
+/**
+ * struct page_pool_recycle_stats - recycling (freeing) statistics
+ * @cached:    recycling placed page in the page pool cache
+ * @cache_full:        page pool cache was full
+ * @ring:      page placed into the ptr ring
+ * @ring_full: page released from page pool because the ptr ring was full
+ * @released_refcnt:   page released (and not recycled) because refcnt > 1
+ */
+struct page_pool_recycle_stats {
+       u64 cached;
+       u64 cache_full;
+       u64 ring;
+       u64 ring_full;
+       u64 released_refcnt;
+};
+
+/**
+ * struct page_pool_stats - combined page pool use statistics
+ * @alloc_stats:       see struct page_pool_alloc_stats
+ * @recycle_stats:     see struct page_pool_recycle_stats
+ *
+ * Wrapper struct for combining page pool stats with different storage
+ * requirements.
+ */
+struct page_pool_stats {
+       struct page_pool_alloc_stats alloc_stats;
+       struct page_pool_recycle_stats recycle_stats;
+};
+#endif
+
+struct page_pool {
+       struct page_pool_params p;
+
+       struct delayed_work release_dw;
+       void (*disconnect)(void *pool);
+       unsigned long defer_start;
+       unsigned long defer_warn;
+
+       u32 pages_state_hold_cnt;
+       unsigned int frag_offset;
+       struct page *frag_page;
+       long frag_users;
+
+#ifdef CONFIG_PAGE_POOL_STATS
+       /* these stats are incremented while in softirq context */
+       struct page_pool_alloc_stats alloc_stats;
+#endif
+       u32 xdp_mem_id;
+
+       /*
+        * Data structure for allocation side
+        *
+        * Drivers allocation side usually already perform some kind
+        * of resource protection.  Piggyback on this protection, and
+        * require driver to protect allocation side.
+        *
+        * For NIC drivers this means, allocate a page_pool per
+        * RX-queue. As the RX-queue is already protected by
+        * Softirq/BH scheduling and napi_schedule. NAPI schedule
+        * guarantee that a single napi_struct will only be scheduled
+        * on a single CPU (see napi_schedule).
+        */
+       struct pp_alloc_cache alloc ____cacheline_aligned_in_smp;
+
+       /* Data structure for storing recycled pages.
+        *
+        * Returning/freeing pages is more complicated synchronization
+        * wise, because free's can happen on remote CPUs, with no
+        * association with allocation resource.
+        *
+        * Use ptr_ring, as it separates consumer and producer
+        * efficiently, it a way that doesn't bounce cache-lines.
+        *
+        * TODO: Implement bulk return pages into this structure.
+        */
+       struct ptr_ring ring;
+
+#ifdef CONFIG_PAGE_POOL_STATS
+       /* recycle stats are per-cpu to avoid locking */
+       struct page_pool_recycle_stats __percpu *recycle_stats;
+#endif
+       atomic_t pages_state_release_cnt;
+
+       /* A page_pool is strictly tied to a single RX-queue being
+        * protected by NAPI, due to above pp_alloc_cache. This
+        * refcnt serves purpose is to simplify drivers error handling.
+        */
+       refcount_t user_cnt;
+
+       u64 destroy_cnt;
+};
+
+struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
+struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset,
+                                 unsigned int size, gfp_t gfp);
+bool page_pool_return_skb_page(struct page *page, bool napi_safe);
+
+struct page_pool *page_pool_create(const struct page_pool_params *params);
+
+struct xdp_mem_info;
+
+#ifdef CONFIG_PAGE_POOL
+void page_pool_unlink_napi(struct page_pool *pool);
+void page_pool_destroy(struct page_pool *pool);
+void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
+                          struct xdp_mem_info *mem);
+void page_pool_put_page_bulk(struct page_pool *pool, void **data,
+                            int count);
+#else
+static inline void page_pool_unlink_napi(struct page_pool *pool)
+{
+}
+
+static inline void page_pool_destroy(struct page_pool *pool)
+{
+}
+
+static inline void page_pool_use_xdp_mem(struct page_pool *pool,
+                                        void (*disconnect)(void *),
+                                        struct xdp_mem_info *mem)
+{
+}
+
+static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data,
+                                          int count)
+{
+}
+#endif
+
+void page_pool_put_defragged_page(struct page_pool *pool, struct page *page,
+                                 unsigned int dma_sync_size,
+                                 bool allow_direct);
+
+static inline bool is_page_pool_compiled_in(void)
+{
+#ifdef CONFIG_PAGE_POOL
+       return true;
+#else
+       return false;
+#endif
+}
+
+/* Caller must provide appropriate safe context, e.g. NAPI. */
+void page_pool_update_nid(struct page_pool *pool, int new_nid);
+
+#endif /* _NET_PAGE_POOL_H */
index ca534501158b7ee2bc6f4e16921218da11ee21c5..6834356b2d2aea55127d377601f2c56106a01a0d 100644 (file)
@@ -9,7 +9,7 @@
 #include <linux/tracepoint.h>
 
 #include <trace/events/mmflags.h>
-#include <net/page_pool.h>
+#include <net/page_pool/types.h>
 
 TRACE_EVENT(page_pool_release,
 
index 0aac76c13fd4abfcd496f0743bd6e5e3b7394664..57a7a64b84ede57b842c04a4adef34725858d5d2 100644 (file)
@@ -15,7 +15,7 @@
 #include <net/sock.h>
 #include <net/tcp.h>
 #include <net/net_namespace.h>
-#include <net/page_pool.h>
+#include <net/page_pool/helpers.h>
 #include <linux/error-injection.h>
 #include <linux/smp.h>
 #include <linux/sock_diag.h>
index 5d615a1697187341f14e0b09510acd72cfe8ca25..cd28c1f14002b24610093d287f0a852ac15d70db 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/slab.h>
 #include <linux/device.h>
 
-#include <net/page_pool.h>
+#include <net/page_pool/helpers.h>
 #include <net/xdp.h>
 
 #include <linux/dma-direction.h>
index c6f98245582cd4dd01a7c4f5708163122500a4f0..d3bed964123c9c61b553c6347d3b789bc120436c 100644 (file)
@@ -73,7 +73,7 @@
 #include <net/mpls.h>
 #include <net/mptcp.h>
 #include <net/mctp.h>
-#include <net/page_pool.h>
+#include <net/page_pool/types.h>
 #include <net/dropreason.h>
 
 #include <linux/uaccess.h>
index 8362130bf085d53d3c5f18bc00763718db8cad5d..a70670fe9a2dc8df69d304f7e2535f32dacf2d79 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/idr.h>
 #include <linux/rhashtable.h>
 #include <linux/bug.h>
-#include <net/page_pool.h>
+#include <net/page_pool/helpers.h>
 
 #include <net/xdp.h>
 #include <net/xdp_priv.h> /* struct xdp_mem_allocator */
This page took 0.176606 seconds and 4 git commands to generate.