1 /* SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 2016 Red Hat, Inc.
9 * DOC: page_pool allocator
11 * The page_pool allocator is optimized for recycling page or page fragment used
12 * by skb packet and xdp frame.
14 * Basic use involves replacing any alloc_pages() calls with page_pool_alloc(),
15 * which allocate memory with or without page splitting depending on the
16 * requested memory size.
18 * If the driver knows that it always requires full pages or its allocations are
19 * always smaller than half a page, it can use one of the more specific API
22 * 1. page_pool_alloc_pages(): allocate memory without page splitting when
23 * driver knows that the memory it need is always bigger than half of the page
24 * allocated from page pool. There is no cache line dirtying for 'struct page'
25 * when a page is recycled back to the page pool.
27 * 2. page_pool_alloc_frag(): allocate memory with page splitting when driver
28 * knows that the memory it need is always smaller than or equal to half of the
29 * page allocated from page pool. Page splitting enables memory saving and thus
30 * avoids TLB/cache miss for data access, but there also is some cost to
31 * implement page splitting, mainly some cache line dirtying/bouncing for
32 * 'struct page' and atomic operation for page->pp_ref_count.
34 * The API keeps track of in-flight pages, in order to let API users know when
35 * it is safe to free a page_pool object, the API users must call
36 * page_pool_put_page() or page_pool_free_va() to free the page_pool object, or
37 * attach the page_pool object to a page_pool-aware object like skbs marked with
38 * skb_mark_for_recycle().
40 * page_pool_put_page() may be called multiple times on the same page if a page
41 * is split into multiple fragments. For the last fragment, it will either
42 * recycle the page, or in case of page->_refcount > 1, it will release the DMA
43 * mapping and in-flight state accounting.
45 * dma_sync_single_range_for_device() is only called for the last fragment when
46 * page_pool is created with PP_FLAG_DMA_SYNC_DEV flag, so it depends on the
47 * last freed fragment to do the sync_for_device operation for all fragments in
48 * the same page when a page is split. The API user must setup pool->p.max_len
49 * and pool->p.offset correctly and ensure that page_pool_put_page() is called
50 * with dma_sync_size being -1 for fragment API.
52 #ifndef _NET_PAGE_POOL_HELPERS_H
53 #define _NET_PAGE_POOL_HELPERS_H
55 #include <linux/dma-mapping.h>
57 #include <net/page_pool/types.h>
59 #ifdef CONFIG_PAGE_POOL_STATS
60 /* Deprecated driver-facing API, use netlink instead */
61 int page_pool_ethtool_stats_get_count(void);
62 u8 *page_pool_ethtool_stats_get_strings(u8 *data);
63 u64 *page_pool_ethtool_stats_get(u64 *data, const void *stats);
65 bool page_pool_get_stats(const struct page_pool *pool,
66 struct page_pool_stats *stats);
68 static inline int page_pool_ethtool_stats_get_count(void)
73 static inline u8 *page_pool_ethtool_stats_get_strings(u8 *data)
78 static inline u64 *page_pool_ethtool_stats_get(u64 *data, const void *stats)
85 * page_pool_dev_alloc_pages() - allocate a page.
86 * @pool: pool from which to allocate
88 * Get a page from the page allocator or page_pool caches.
90 static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool)
92 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
94 return page_pool_alloc_pages(pool, gfp);
98 * page_pool_dev_alloc_frag() - allocate a page fragment.
99 * @pool: pool from which to allocate
100 * @offset: offset to the allocated page
101 * @size: requested size
103 * Get a page fragment from the page allocator or page_pool caches.
106 * Return allocated page fragment, otherwise return NULL.
108 static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool,
109 unsigned int *offset,
112 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
114 return page_pool_alloc_frag(pool, offset, size, gfp);
117 static inline struct page *page_pool_alloc(struct page_pool *pool,
118 unsigned int *offset,
119 unsigned int *size, gfp_t gfp)
121 unsigned int max_size = PAGE_SIZE << pool->p.order;
124 if ((*size << 1) > max_size) {
127 return page_pool_alloc_pages(pool, gfp);
130 page = page_pool_alloc_frag(pool, offset, *size, gfp);
134 /* There is very likely not enough space for another fragment, so append
135 * the remaining size to the current fragment to avoid truesize
136 * underestimate problem.
138 if (pool->frag_offset + *size > max_size) {
139 *size = max_size - *offset;
140 pool->frag_offset = max_size;
147 * page_pool_dev_alloc() - allocate a page or a page fragment.
148 * @pool: pool from which to allocate
149 * @offset: offset to the allocated page
150 * @size: in as the requested size, out as the allocated size
152 * Get a page or a page fragment from the page allocator or page_pool caches
153 * depending on the requested size in order to allocate memory with least memory
154 * utilization and performance penalty.
157 * Return allocated page or page fragment, otherwise return NULL.
159 static inline struct page *page_pool_dev_alloc(struct page_pool *pool,
160 unsigned int *offset,
163 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
165 return page_pool_alloc(pool, offset, size, gfp);
168 static inline void *page_pool_alloc_va(struct page_pool *pool,
169 unsigned int *size, gfp_t gfp)
174 /* Mask off __GFP_HIGHMEM to ensure we can use page_address() */
175 page = page_pool_alloc(pool, &offset, size, gfp & ~__GFP_HIGHMEM);
179 return page_address(page) + offset;
183 * page_pool_dev_alloc_va() - allocate a page or a page fragment and return its
185 * @pool: pool from which to allocate
186 * @size: in as the requested size, out as the allocated size
188 * This is just a thin wrapper around the page_pool_alloc() API, and
189 * it returns va of the allocated page or page fragment.
192 * Return the va for the allocated page or page fragment, otherwise return NULL.
194 static inline void *page_pool_dev_alloc_va(struct page_pool *pool,
197 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
199 return page_pool_alloc_va(pool, size, gfp);
203 * page_pool_get_dma_dir() - Retrieve the stored DMA direction.
204 * @pool: pool from which page was allocated
206 * Get the stored dma direction. A driver might decide to store this locally
207 * and avoid the extra cache line from page_pool to determine the direction.
209 static inline enum dma_data_direction
210 page_pool_get_dma_dir(const struct page_pool *pool)
212 return pool->p.dma_dir;
216 * page_pool_fragment_page() - split a fresh page into fragments
217 * @page: page to split
218 * @nr: references to set
220 * pp_ref_count represents the number of outstanding references to the page,
221 * which will be freed using page_pool APIs (rather than page allocator APIs
222 * like put_page()). Such references are usually held by page_pool-aware
223 * objects like skbs marked for page pool recycling.
225 * This helper allows the caller to take (set) multiple references to a
226 * freshly allocated page. The page must be freshly allocated (have a
227 * pp_ref_count of 1). This is commonly done by drivers and
228 * "fragment allocators" to save atomic operations - either when they know
229 * upfront how many references they will need; or to take MAX references and
230 * return the unused ones with a single atomic dec(), instead of performing
231 * multiple atomic inc() operations.
233 static inline void page_pool_fragment_page(struct page *page, long nr)
235 atomic_long_set(&page->pp_ref_count, nr);
238 static inline long page_pool_unref_page(struct page *page, long nr)
242 /* If nr == pp_ref_count then we have cleared all remaining
243 * references to the page:
244 * 1. 'n == 1': no need to actually overwrite it.
245 * 2. 'n != 1': overwrite it with one, which is the rare case
246 * for pp_ref_count draining.
248 * The main advantage to doing this is that not only we avoid a atomic
249 * update, as an atomic_read is generally a much cheaper operation than
250 * an atomic update, especially when dealing with a page that may be
251 * referenced by only 2 or 3 users; but also unify the pp_ref_count
252 * handling by ensuring all pages have partitioned into only 1 piece
253 * initially, and only overwrite it when the page is partitioned into
254 * more than one piece.
256 if (atomic_long_read(&page->pp_ref_count) == nr) {
257 /* As we have ensured nr is always one for constant case using
258 * the BUILD_BUG_ON(), only need to handle the non-constant case
259 * here for pp_ref_count draining, which is a rare case.
261 BUILD_BUG_ON(__builtin_constant_p(nr) && nr != 1);
262 if (!__builtin_constant_p(nr))
263 atomic_long_set(&page->pp_ref_count, 1);
268 ret = atomic_long_sub_return(nr, &page->pp_ref_count);
271 /* We are the last user here too, reset pp_ref_count back to 1 to
272 * ensure all pages have been partitioned into 1 piece initially,
273 * this should be the rare case when the last two fragment users call
274 * page_pool_unref_page() currently.
277 atomic_long_set(&page->pp_ref_count, 1);
282 static inline void page_pool_ref_page(struct page *page)
284 atomic_long_inc(&page->pp_ref_count);
287 static inline bool page_pool_is_last_ref(struct page *page)
289 /* If page_pool_unref_page() returns 0, we were the last user */
290 return page_pool_unref_page(page, 1) == 0;
294 * page_pool_put_page() - release a reference to a page pool page
295 * @pool: pool from which page was allocated
296 * @page: page to release a reference on
297 * @dma_sync_size: how much of the page may have been touched by the device
298 * @allow_direct: released by the consumer, allow lockless caching
300 * The outcome of this depends on the page refcnt. If the driver bumps
301 * the refcnt > 1 this will unmap the page. If the page refcnt is 1
302 * the allocator owns the page and will try to recycle it in one of the pool
303 * caches. If PP_FLAG_DMA_SYNC_DEV is set, the page will be synced for_device
304 * using dma_sync_single_range_for_device().
306 static inline void page_pool_put_page(struct page_pool *pool,
308 unsigned int dma_sync_size,
311 /* When page_pool isn't compiled-in, net/core/xdp.c doesn't
312 * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
314 #ifdef CONFIG_PAGE_POOL
315 if (!page_pool_is_last_ref(page))
318 page_pool_put_unrefed_page(pool, page, dma_sync_size, allow_direct);
323 * page_pool_put_full_page() - release a reference on a page pool page
324 * @pool: pool from which page was allocated
325 * @page: page to release a reference on
326 * @allow_direct: released by the consumer, allow lockless caching
328 * Similar to page_pool_put_page(), but will DMA sync the entire memory area
329 * as configured in &page_pool_params.max_len.
331 static inline void page_pool_put_full_page(struct page_pool *pool,
332 struct page *page, bool allow_direct)
334 page_pool_put_page(pool, page, -1, allow_direct);
338 * page_pool_recycle_direct() - release a reference on a page pool page
339 * @pool: pool from which page was allocated
340 * @page: page to release a reference on
342 * Similar to page_pool_put_full_page() but caller must guarantee safe context
343 * (e.g NAPI), since it will recycle the page directly into the pool fast cache.
345 static inline void page_pool_recycle_direct(struct page_pool *pool,
348 page_pool_put_full_page(pool, page, true);
351 #define PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA \
352 (sizeof(dma_addr_t) > sizeof(unsigned long))
355 * page_pool_free_va() - free a va into the page_pool
356 * @pool: pool from which va was allocated
357 * @va: va to be freed
358 * @allow_direct: freed by the consumer, allow lockless caching
360 * Free a va allocated from page_pool_allo_va().
362 static inline void page_pool_free_va(struct page_pool *pool, void *va,
365 page_pool_put_page(pool, virt_to_head_page(va), -1, allow_direct);
369 * page_pool_get_dma_addr() - Retrieve the stored DMA address.
370 * @page: page allocated from a page pool
372 * Fetch the DMA address of the page. The page pool to which the page belongs
373 * must had been created with PP_FLAG_DMA_MAP.
375 static inline dma_addr_t page_pool_get_dma_addr(const struct page *page)
377 dma_addr_t ret = page->dma_addr;
379 if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA)
385 static inline bool page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
387 if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA) {
388 page->dma_addr = addr >> PAGE_SHIFT;
390 /* We assume page alignment to shave off bottom bits,
391 * if this "compression" doesn't work we need to drop.
393 return addr != (dma_addr_t)page->dma_addr << PAGE_SHIFT;
396 page->dma_addr = addr;
401 * page_pool_dma_sync_for_cpu - sync Rx page for CPU after it's written by HW
402 * @pool: &page_pool the @page belongs to
403 * @page: page to sync
404 * @offset: offset from page start to "hard" start if using PP frags
405 * @dma_sync_size: size of the data written to the page
407 * Can be used as a shorthand to sync Rx pages before accessing them in the
408 * driver. Caller must ensure the pool was created with ``PP_FLAG_DMA_MAP``.
409 * Note that this version performs DMA sync unconditionally, even if the
410 * associated PP doesn't perform sync-for-device.
412 static inline void page_pool_dma_sync_for_cpu(const struct page_pool *pool,
413 const struct page *page,
414 u32 offset, u32 dma_sync_size)
416 dma_sync_single_range_for_cpu(pool->p.dev,
417 page_pool_get_dma_addr(page),
418 offset + pool->p.offset, dma_sync_size,
419 page_pool_get_dma_dir(pool));
422 static inline bool page_pool_put(struct page_pool *pool)
424 return refcount_dec_and_test(&pool->user_cnt);
427 static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
429 if (unlikely(pool->p.nid != new_nid))
430 page_pool_update_nid(pool, new_nid);
433 #endif /* _NET_PAGE_POOL_HELPERS_H */