Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at master 237 lines 6.5 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2/* 3 * Device memory TCP support 4 * 5 * Authors: Mina Almasry <almasrymina@google.com> 6 * Willem de Bruijn <willemb@google.com> 7 * Kaiyuan Zhang <kaiyuanz@google.com> 8 * 9 */ 10#ifndef _NET_DEVMEM_H 11#define _NET_DEVMEM_H 12 13#include <net/netmem.h> 14#include <net/netdev_netlink.h> 15 16struct netlink_ext_ack; 17 18struct net_devmem_dmabuf_binding { 19 struct dma_buf *dmabuf; 20 struct dma_buf_attachment *attachment; 21 struct sg_table *sgt; 22 struct net_device *dev; 23 struct gen_pool *chunk_pool; 24 /* Protect dev */ 25 struct mutex lock; 26 27 /* The user holds a ref (via the netlink API) for as long as they want 28 * the binding to remain alive. Each page pool using this binding holds 29 * a ref to keep the binding alive. The page_pool does not release the 30 * ref until all the net_iovs allocated from this binding are released 31 * back to the page_pool. 32 * 33 * The binding undos itself and unmaps the underlying dmabuf once all 34 * those refs are dropped and the binding is no longer desired or in 35 * use. 36 * 37 * net_devmem_get_net_iov() on dmabuf net_iovs will increment this 38 * reference, making sure that the binding remains alive until all the 39 * net_iovs are no longer used. net_iovs allocated from this binding 40 * that are stuck in the TX path for any reason (such as awaiting 41 * retransmits) hold a reference to the binding until the skb holding 42 * them is freed. 43 */ 44 struct percpu_ref ref; 45 46 /* The list of bindings currently active. Used for netlink to notify us 47 * of the user dropping the bind. 48 */ 49 struct list_head list; 50 51 /* rxq's this binding is active on. */ 52 struct xarray bound_rxqs; 53 54 /* ID of this binding. Globally unique to all bindings currently 55 * active. 56 */ 57 u32 id; 58 59 /* DMA direction, FROM_DEVICE for Rx binding, TO_DEVICE for Tx. */ 60 enum dma_data_direction direction; 61 62 /* Array of net_iov pointers for this binding, sorted by virtual 63 * address. This array is convenient to map the virtual addresses to 64 * net_iovs in the TX path. 65 */ 66 struct net_iov **tx_vec; 67 68 struct work_struct unbind_w; 69}; 70 71#if defined(CONFIG_NET_DEVMEM) 72/* Owner of the dma-buf chunks inserted into the gen pool. Each scatterlist 73 * entry from the dmabuf is inserted into the genpool as a chunk, and needs 74 * this owner struct to keep track of some metadata necessary to create 75 * allocations from this chunk. 76 */ 77struct dmabuf_genpool_chunk_owner { 78 struct net_iov_area area; 79 struct net_devmem_dmabuf_binding *binding; 80 81 /* dma_addr of the start of the chunk. */ 82 dma_addr_t base_dma_addr; 83}; 84 85void __net_devmem_dmabuf_binding_free(struct work_struct *wq); 86struct net_devmem_dmabuf_binding * 87net_devmem_bind_dmabuf(struct net_device *dev, 88 struct device *dma_dev, 89 enum dma_data_direction direction, 90 unsigned int dmabuf_fd, struct netdev_nl_sock *priv, 91 struct netlink_ext_ack *extack); 92struct net_devmem_dmabuf_binding *net_devmem_lookup_dmabuf(u32 id); 93void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding); 94int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx, 95 struct net_devmem_dmabuf_binding *binding, 96 struct netlink_ext_ack *extack); 97 98static inline struct dmabuf_genpool_chunk_owner * 99net_devmem_iov_to_chunk_owner(const struct net_iov *niov) 100{ 101 struct net_iov_area *owner = net_iov_owner(niov); 102 103 return container_of(owner, struct dmabuf_genpool_chunk_owner, area); 104} 105 106static inline struct net_devmem_dmabuf_binding * 107net_devmem_iov_binding(const struct net_iov *niov) 108{ 109 return net_devmem_iov_to_chunk_owner(niov)->binding; 110} 111 112static inline u32 net_devmem_iov_binding_id(const struct net_iov *niov) 113{ 114 return net_devmem_iov_binding(niov)->id; 115} 116 117static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov) 118{ 119 struct net_iov_area *owner = net_iov_owner(niov); 120 121 return owner->base_virtual + 122 ((unsigned long)net_iov_idx(niov) << PAGE_SHIFT); 123} 124 125static inline bool 126net_devmem_dmabuf_binding_get(struct net_devmem_dmabuf_binding *binding) 127{ 128 return percpu_ref_tryget(&binding->ref); 129} 130 131static inline void 132net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding) 133{ 134 percpu_ref_put(&binding->ref); 135} 136 137void net_devmem_get_net_iov(struct net_iov *niov); 138void net_devmem_put_net_iov(struct net_iov *niov); 139 140struct net_iov * 141net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding); 142void net_devmem_free_dmabuf(struct net_iov *ppiov); 143 144 145struct net_devmem_dmabuf_binding * 146net_devmem_get_binding(struct sock *sk, unsigned int dmabuf_id); 147struct net_iov * 148net_devmem_get_niov_at(struct net_devmem_dmabuf_binding *binding, size_t addr, 149 size_t *off, size_t *size); 150 151#else 152struct net_devmem_dmabuf_binding; 153 154static inline void 155net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding) 156{ 157} 158 159static inline void net_devmem_get_net_iov(struct net_iov *niov) 160{ 161} 162 163static inline void net_devmem_put_net_iov(struct net_iov *niov) 164{ 165} 166 167static inline struct net_devmem_dmabuf_binding * 168net_devmem_bind_dmabuf(struct net_device *dev, 169 struct device *dma_dev, 170 enum dma_data_direction direction, 171 unsigned int dmabuf_fd, 172 struct netdev_nl_sock *priv, 173 struct netlink_ext_ack *extack) 174{ 175 return ERR_PTR(-EOPNOTSUPP); 176} 177 178static inline struct net_devmem_dmabuf_binding *net_devmem_lookup_dmabuf(u32 id) 179{ 180 return NULL; 181} 182 183static inline void 184net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding) 185{ 186} 187 188static inline int 189net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx, 190 struct net_devmem_dmabuf_binding *binding, 191 struct netlink_ext_ack *extack) 192 193{ 194 return -EOPNOTSUPP; 195} 196 197static inline struct net_iov * 198net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding) 199{ 200 return NULL; 201} 202 203static inline void net_devmem_free_dmabuf(struct net_iov *ppiov) 204{ 205} 206 207static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov) 208{ 209 return 0; 210} 211 212static inline u32 net_devmem_iov_binding_id(const struct net_iov *niov) 213{ 214 return 0; 215} 216 217static inline struct net_devmem_dmabuf_binding * 218net_devmem_get_binding(struct sock *sk, unsigned int dmabuf_id) 219{ 220 return ERR_PTR(-EOPNOTSUPP); 221} 222 223static inline struct net_iov * 224net_devmem_get_niov_at(struct net_devmem_dmabuf_binding *binding, size_t addr, 225 size_t *off, size_t *size) 226{ 227 return NULL; 228} 229 230static inline struct net_devmem_dmabuf_binding * 231net_devmem_iov_binding(const struct net_iov *niov) 232{ 233 return NULL; 234} 235#endif 236 237#endif /* _NET_DEVMEM_H */