1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * Device memory TCP support
13 struct netlink_ext_ack;
15 struct net_devmem_dmabuf_binding {
16 struct dma_buf *dmabuf;
17 struct dma_buf_attachment *attachment;
19 struct net_device *dev;
20 struct gen_pool *chunk_pool;
22 /* The user holds a ref (via the netlink API) for as long as they want
23 * the binding to remain alive. Each page pool using this binding holds
24 * a ref to keep the binding alive. Each allocated net_iov holds a
27 * The binding undos itself and unmaps the underlying dmabuf once all
28 * those refs are dropped and the binding is no longer desired or in
33 /* The list of bindings currently active. Used for netlink to notify us
34 * of the user dropping the bind.
36 struct list_head list;
38 /* rxq's this binding is active on. */
39 struct xarray bound_rxqs;
41 /* ID of this binding. Globally unique to all bindings currently
47 #if defined(CONFIG_NET_DEVMEM)
48 /* Owner of the dma-buf chunks inserted into the gen pool. Each scatterlist
49 * entry from the dmabuf is inserted into the genpool as a chunk, and needs
50 * this owner struct to keep track of some metadata necessary to create
51 * allocations from this chunk.
53 struct dmabuf_genpool_chunk_owner {
54 /* Offset into the dma-buf where this chunk starts. */
55 unsigned long base_virtual;
57 /* dma_addr of the start of the chunk. */
58 dma_addr_t base_dma_addr;
60 /* Array of net_iovs for this chunk. */
61 struct net_iov *niovs;
64 struct net_devmem_dmabuf_binding *binding;
67 void __net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding);
68 struct net_devmem_dmabuf_binding *
69 net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
70 struct netlink_ext_ack *extack);
71 void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding);
72 int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
73 struct net_devmem_dmabuf_binding *binding,
74 struct netlink_ext_ack *extack);
75 void dev_dmabuf_uninstall(struct net_device *dev);
77 static inline struct dmabuf_genpool_chunk_owner *
78 net_iov_owner(const struct net_iov *niov)
83 static inline unsigned int net_iov_idx(const struct net_iov *niov)
85 return niov - net_iov_owner(niov)->niovs;
88 static inline struct net_devmem_dmabuf_binding *
89 net_iov_binding(const struct net_iov *niov)
91 return net_iov_owner(niov)->binding;
94 static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
96 struct dmabuf_genpool_chunk_owner *owner = net_iov_owner(niov);
98 return owner->base_virtual +
99 ((unsigned long)net_iov_idx(niov) << PAGE_SHIFT);
102 static inline u32 net_iov_binding_id(const struct net_iov *niov)
104 return net_iov_owner(niov)->binding->id;
108 net_devmem_dmabuf_binding_get(struct net_devmem_dmabuf_binding *binding)
110 refcount_inc(&binding->ref);
114 net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding)
116 if (!refcount_dec_and_test(&binding->ref))
119 __net_devmem_dmabuf_binding_free(binding);
123 net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding);
124 void net_devmem_free_dmabuf(struct net_iov *ppiov);
127 struct net_devmem_dmabuf_binding;
130 __net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding)
134 static inline struct net_devmem_dmabuf_binding *
135 net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
136 struct netlink_ext_ack *extack)
138 return ERR_PTR(-EOPNOTSUPP);
142 net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
147 net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
148 struct net_devmem_dmabuf_binding *binding,
149 struct netlink_ext_ack *extack)
155 static inline void dev_dmabuf_uninstall(struct net_device *dev)
159 static inline struct net_iov *
160 net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
165 static inline void net_devmem_free_dmabuf(struct net_iov *ppiov)
169 static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
174 static inline u32 net_iov_binding_id(const struct net_iov *niov)
180 #endif /* _NET_DEVMEM_H */