Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2/*
3 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
5 * Copyright (c) 2004, 2020 Intel Corporation. All rights reserved.
6 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
7 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
8 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
9 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
10 */
11
12#ifndef IB_VERBS_H
13#define IB_VERBS_H
14
15#include <linux/ethtool.h>
16#include <linux/types.h>
17#include <linux/device.h>
18#include <linux/bvec.h>
19#include <linux/dma-mapping.h>
20#include <linux/kref.h>
21#include <linux/list.h>
22#include <linux/rwsem.h>
23#include <linux/workqueue.h>
24#include <linux/irq_poll.h>
25#include <uapi/linux/if_ether.h>
26#include <net/ipv6.h>
27#include <net/ip.h>
28#include <linux/string.h>
29#include <linux/slab.h>
30#include <linux/netdevice.h>
31#include <linux/refcount.h>
32#include <linux/if_link.h>
33#include <linux/atomic.h>
34#include <linux/mmu_notifier.h>
35#include <linux/uaccess.h>
36#include <linux/cgroup_rdma.h>
37#include <linux/irqflags.h>
38#include <linux/preempt.h>
39#include <linux/dim.h>
40#include <uapi/rdma/ib_user_verbs.h>
41#include <rdma/rdma_counter.h>
42#include <rdma/restrack.h>
43#include <rdma/signature.h>
44#include <uapi/rdma/rdma_user_ioctl.h>
45#include <uapi/rdma/ib_user_ioctl_verbs.h>
46#include <linux/pci-tph.h>
47#include <rdma/frmr_pools.h>
48#include <linux/dma-buf.h>
49
50#define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
51
52struct ib_umem_odp;
53struct ib_uqp_object;
54struct ib_usrq_object;
55struct ib_uwq_object;
56struct rdma_cm_id;
57struct ib_port;
58struct hw_stats_device_data;
59
60extern struct workqueue_struct *ib_wq;
61extern struct workqueue_struct *ib_comp_wq;
62extern struct workqueue_struct *ib_comp_unbound_wq;
63
64struct ib_ucq_object;
65
66__printf(2, 3) __cold
67void ibdev_emerg(const struct ib_device *ibdev, const char *format, ...);
68__printf(2, 3) __cold
69void ibdev_alert(const struct ib_device *ibdev, const char *format, ...);
70__printf(2, 3) __cold
71void ibdev_crit(const struct ib_device *ibdev, const char *format, ...);
72__printf(2, 3) __cold
73void ibdev_err(const struct ib_device *ibdev, const char *format, ...);
74__printf(2, 3) __cold
75void ibdev_warn(const struct ib_device *ibdev, const char *format, ...);
76__printf(2, 3) __cold
77void ibdev_notice(const struct ib_device *ibdev, const char *format, ...);
78__printf(2, 3) __cold
79void ibdev_info(const struct ib_device *ibdev, const char *format, ...);
80
81#if defined(CONFIG_DYNAMIC_DEBUG) || \
82 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
83#define ibdev_dbg(__dev, format, args...) \
84 dynamic_ibdev_dbg(__dev, format, ##args)
85#else
86__printf(2, 3) __cold
87static inline
88void ibdev_dbg(const struct ib_device *ibdev, const char *format, ...) {}
89#endif
90
91#define ibdev_level_ratelimited(ibdev_level, ibdev, fmt, ...) \
92do { \
93 static DEFINE_RATELIMIT_STATE(_rs, \
94 DEFAULT_RATELIMIT_INTERVAL, \
95 DEFAULT_RATELIMIT_BURST); \
96 if (__ratelimit(&_rs)) \
97 ibdev_level(ibdev, fmt, ##__VA_ARGS__); \
98} while (0)
99
100#define ibdev_emerg_ratelimited(ibdev, fmt, ...) \
101 ibdev_level_ratelimited(ibdev_emerg, ibdev, fmt, ##__VA_ARGS__)
102#define ibdev_alert_ratelimited(ibdev, fmt, ...) \
103 ibdev_level_ratelimited(ibdev_alert, ibdev, fmt, ##__VA_ARGS__)
104#define ibdev_crit_ratelimited(ibdev, fmt, ...) \
105 ibdev_level_ratelimited(ibdev_crit, ibdev, fmt, ##__VA_ARGS__)
106#define ibdev_err_ratelimited(ibdev, fmt, ...) \
107 ibdev_level_ratelimited(ibdev_err, ibdev, fmt, ##__VA_ARGS__)
108#define ibdev_warn_ratelimited(ibdev, fmt, ...) \
109 ibdev_level_ratelimited(ibdev_warn, ibdev, fmt, ##__VA_ARGS__)
110#define ibdev_notice_ratelimited(ibdev, fmt, ...) \
111 ibdev_level_ratelimited(ibdev_notice, ibdev, fmt, ##__VA_ARGS__)
112#define ibdev_info_ratelimited(ibdev, fmt, ...) \
113 ibdev_level_ratelimited(ibdev_info, ibdev, fmt, ##__VA_ARGS__)
114
115#if defined(CONFIG_DYNAMIC_DEBUG) || \
116 (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE))
117/* descriptor check is first to prevent flooding with "callbacks suppressed" */
118#define ibdev_dbg_ratelimited(ibdev, fmt, ...) \
119do { \
120 static DEFINE_RATELIMIT_STATE(_rs, \
121 DEFAULT_RATELIMIT_INTERVAL, \
122 DEFAULT_RATELIMIT_BURST); \
123 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
124 if (DYNAMIC_DEBUG_BRANCH(descriptor) && __ratelimit(&_rs)) \
125 __dynamic_ibdev_dbg(&descriptor, ibdev, fmt, \
126 ##__VA_ARGS__); \
127} while (0)
128#else
129__printf(2, 3) __cold
130static inline
131void ibdev_dbg_ratelimited(const struct ib_device *ibdev, const char *format, ...) {}
132#endif
133
134union ib_gid {
135 u8 raw[16];
136 struct {
137 __be64 subnet_prefix;
138 __be64 interface_id;
139 } global;
140};
141
142extern union ib_gid zgid;
143
144enum ib_gid_type {
145 IB_GID_TYPE_IB = IB_UVERBS_GID_TYPE_IB,
146 IB_GID_TYPE_ROCE = IB_UVERBS_GID_TYPE_ROCE_V1,
147 IB_GID_TYPE_ROCE_UDP_ENCAP = IB_UVERBS_GID_TYPE_ROCE_V2,
148 IB_GID_TYPE_SIZE
149};
150
151#define ROCE_V2_UDP_DPORT 4791
152struct ib_gid_attr {
153 struct net_device __rcu *ndev;
154 struct ib_device *device;
155 union ib_gid gid;
156 enum ib_gid_type gid_type;
157 u16 index;
158 u32 port_num;
159};
160
161enum {
162 /* set the local administered indication */
163 IB_SA_WELL_KNOWN_GUID = BIT_ULL(57) | 2,
164};
165
166enum rdma_transport_type {
167 RDMA_TRANSPORT_IB,
168 RDMA_TRANSPORT_IWARP,
169 RDMA_TRANSPORT_USNIC,
170 RDMA_TRANSPORT_USNIC_UDP,
171 RDMA_TRANSPORT_UNSPECIFIED,
172};
173
174enum rdma_protocol_type {
175 RDMA_PROTOCOL_IB,
176 RDMA_PROTOCOL_IBOE,
177 RDMA_PROTOCOL_IWARP,
178 RDMA_PROTOCOL_USNIC_UDP
179};
180
181__attribute_const__ enum rdma_transport_type
182rdma_node_get_transport(unsigned int node_type);
183
184enum rdma_network_type {
185 RDMA_NETWORK_IB,
186 RDMA_NETWORK_ROCE_V1,
187 RDMA_NETWORK_IPV4,
188 RDMA_NETWORK_IPV6
189};
190
191static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type network_type)
192{
193 if (network_type == RDMA_NETWORK_IPV4 ||
194 network_type == RDMA_NETWORK_IPV6)
195 return IB_GID_TYPE_ROCE_UDP_ENCAP;
196 else if (network_type == RDMA_NETWORK_ROCE_V1)
197 return IB_GID_TYPE_ROCE;
198 else
199 return IB_GID_TYPE_IB;
200}
201
202static inline enum rdma_network_type
203rdma_gid_attr_network_type(const struct ib_gid_attr *attr)
204{
205 if (attr->gid_type == IB_GID_TYPE_IB)
206 return RDMA_NETWORK_IB;
207
208 if (attr->gid_type == IB_GID_TYPE_ROCE)
209 return RDMA_NETWORK_ROCE_V1;
210
211 if (ipv6_addr_v4mapped((struct in6_addr *)&attr->gid))
212 return RDMA_NETWORK_IPV4;
213 else
214 return RDMA_NETWORK_IPV6;
215}
216
217enum rdma_link_layer {
218 IB_LINK_LAYER_UNSPECIFIED,
219 IB_LINK_LAYER_INFINIBAND,
220 IB_LINK_LAYER_ETHERNET,
221};
222
223enum ib_device_cap_flags {
224 IB_DEVICE_RESIZE_MAX_WR = IB_UVERBS_DEVICE_RESIZE_MAX_WR,
225 IB_DEVICE_BAD_PKEY_CNTR = IB_UVERBS_DEVICE_BAD_PKEY_CNTR,
226 IB_DEVICE_BAD_QKEY_CNTR = IB_UVERBS_DEVICE_BAD_QKEY_CNTR,
227 IB_DEVICE_RAW_MULTI = IB_UVERBS_DEVICE_RAW_MULTI,
228 IB_DEVICE_AUTO_PATH_MIG = IB_UVERBS_DEVICE_AUTO_PATH_MIG,
229 IB_DEVICE_CHANGE_PHY_PORT = IB_UVERBS_DEVICE_CHANGE_PHY_PORT,
230 IB_DEVICE_UD_AV_PORT_ENFORCE = IB_UVERBS_DEVICE_UD_AV_PORT_ENFORCE,
231 IB_DEVICE_CURR_QP_STATE_MOD = IB_UVERBS_DEVICE_CURR_QP_STATE_MOD,
232 IB_DEVICE_SHUTDOWN_PORT = IB_UVERBS_DEVICE_SHUTDOWN_PORT,
233 /* IB_DEVICE_INIT_TYPE = IB_UVERBS_DEVICE_INIT_TYPE, (not in use) */
234 IB_DEVICE_PORT_ACTIVE_EVENT = IB_UVERBS_DEVICE_PORT_ACTIVE_EVENT,
235 IB_DEVICE_SYS_IMAGE_GUID = IB_UVERBS_DEVICE_SYS_IMAGE_GUID,
236 IB_DEVICE_RC_RNR_NAK_GEN = IB_UVERBS_DEVICE_RC_RNR_NAK_GEN,
237 IB_DEVICE_SRQ_RESIZE = IB_UVERBS_DEVICE_SRQ_RESIZE,
238 IB_DEVICE_N_NOTIFY_CQ = IB_UVERBS_DEVICE_N_NOTIFY_CQ,
239
240 /* Reserved, old SEND_W_INV = 1 << 16,*/
241 IB_DEVICE_MEM_WINDOW = IB_UVERBS_DEVICE_MEM_WINDOW,
242 /*
243 * Devices should set IB_DEVICE_UD_IP_SUM if they support
244 * insertion of UDP and TCP checksum on outgoing UD IPoIB
245 * messages and can verify the validity of checksum for
246 * incoming messages. Setting this flag implies that the
247 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
248 */
249 IB_DEVICE_UD_IP_CSUM = IB_UVERBS_DEVICE_UD_IP_CSUM,
250 IB_DEVICE_XRC = IB_UVERBS_DEVICE_XRC,
251
252 /*
253 * This device supports the IB "base memory management extension",
254 * which includes support for fast registrations (IB_WR_REG_MR,
255 * IB_WR_LOCAL_INV and IB_WR_SEND_WITH_INV verbs). This flag should
256 * also be set by any iWarp device which must support FRs to comply
257 * to the iWarp verbs spec. iWarp devices also support the
258 * IB_WR_RDMA_READ_WITH_INV verb for RDMA READs that invalidate the
259 * stag.
260 */
261 IB_DEVICE_MEM_MGT_EXTENSIONS = IB_UVERBS_DEVICE_MEM_MGT_EXTENSIONS,
262 IB_DEVICE_MEM_WINDOW_TYPE_2A = IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2A,
263 IB_DEVICE_MEM_WINDOW_TYPE_2B = IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2B,
264 IB_DEVICE_RC_IP_CSUM = IB_UVERBS_DEVICE_RC_IP_CSUM,
265 /* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */
266 IB_DEVICE_RAW_IP_CSUM = IB_UVERBS_DEVICE_RAW_IP_CSUM,
267 IB_DEVICE_MANAGED_FLOW_STEERING =
268 IB_UVERBS_DEVICE_MANAGED_FLOW_STEERING,
269 /* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
270 IB_DEVICE_RAW_SCATTER_FCS = IB_UVERBS_DEVICE_RAW_SCATTER_FCS,
271 /* The device supports padding incoming writes to cacheline. */
272 IB_DEVICE_PCI_WRITE_END_PADDING =
273 IB_UVERBS_DEVICE_PCI_WRITE_END_PADDING,
274 /* Placement type attributes */
275 IB_DEVICE_FLUSH_GLOBAL = IB_UVERBS_DEVICE_FLUSH_GLOBAL,
276 IB_DEVICE_FLUSH_PERSISTENT = IB_UVERBS_DEVICE_FLUSH_PERSISTENT,
277 IB_DEVICE_ATOMIC_WRITE = IB_UVERBS_DEVICE_ATOMIC_WRITE,
278};
279
280enum ib_kernel_cap_flags {
281 /*
282 * This device supports a per-device lkey or stag that can be
283 * used without performing a memory registration for the local
284 * memory. Note that ULPs should never check this flag, but
285 * instead of use the local_dma_lkey flag in the ib_pd structure,
286 * which will always contain a usable lkey.
287 */
288 IBK_LOCAL_DMA_LKEY = 1 << 0,
289 /* IB_QP_CREATE_INTEGRITY_EN is supported to implement T10-PI */
290 IBK_INTEGRITY_HANDOVER = 1 << 1,
291 /* IB_ACCESS_ON_DEMAND is supported during reg_user_mr() */
292 IBK_ON_DEMAND_PAGING = 1 << 2,
293 /* IB_MR_TYPE_SG_GAPS is supported */
294 IBK_SG_GAPS_REG = 1 << 3,
295 /* Driver supports RDMA_NLDEV_CMD_DELLINK */
296 IBK_ALLOW_USER_UNREG = 1 << 4,
297
298 /* ipoib will use IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK */
299 IBK_BLOCK_MULTICAST_LOOPBACK = 1 << 5,
300 /* iopib will use IB_QP_CREATE_IPOIB_UD_LSO for its QPs */
301 IBK_UD_TSO = 1 << 6,
302 /* iopib will use the device ops:
303 * get_vf_config
304 * get_vf_guid
305 * get_vf_stats
306 * set_vf_guid
307 * set_vf_link_state
308 */
309 IBK_VIRTUAL_FUNCTION = 1 << 7,
310 /* ipoib will use IB_QP_CREATE_NETDEV_USE for its QPs */
311 IBK_RDMA_NETDEV_OPA = 1 << 8,
312};
313
314enum ib_atomic_cap {
315 IB_ATOMIC_NONE,
316 IB_ATOMIC_HCA,
317 IB_ATOMIC_GLOB
318};
319
320enum ib_odp_general_cap_bits {
321 IB_ODP_SUPPORT = IB_UVERBS_ODP_SUPPORT,
322 IB_ODP_SUPPORT_IMPLICIT = IB_UVERBS_ODP_SUPPORT_IMPLICIT,
323};
324
325enum ib_odp_transport_cap_bits {
326 IB_ODP_SUPPORT_SEND = IB_UVERBS_ODP_SUPPORT_SEND,
327 IB_ODP_SUPPORT_RECV = IB_UVERBS_ODP_SUPPORT_RECV,
328 IB_ODP_SUPPORT_WRITE = IB_UVERBS_ODP_SUPPORT_WRITE,
329 IB_ODP_SUPPORT_READ = IB_UVERBS_ODP_SUPPORT_READ,
330 IB_ODP_SUPPORT_ATOMIC = IB_UVERBS_ODP_SUPPORT_ATOMIC,
331 IB_ODP_SUPPORT_SRQ_RECV = IB_UVERBS_ODP_SUPPORT_SRQ_RECV,
332 IB_ODP_SUPPORT_FLUSH = IB_UVERBS_ODP_SUPPORT_FLUSH,
333 IB_ODP_SUPPORT_ATOMIC_WRITE = IB_UVERBS_ODP_SUPPORT_ATOMIC_WRITE,
334};
335
336struct ib_odp_caps {
337 uint64_t general_caps;
338 struct {
339 uint32_t rc_odp_caps;
340 uint32_t uc_odp_caps;
341 uint32_t ud_odp_caps;
342 uint32_t xrc_odp_caps;
343 } per_transport_caps;
344};
345
346struct ib_rss_caps {
347 /* Corresponding bit will be set if qp type from
348 * 'enum ib_qp_type' is supported, e.g.
349 * supported_qpts |= 1 << IB_QPT_UD
350 */
351 u32 supported_qpts;
352 u32 max_rwq_indirection_tables;
353 u32 max_rwq_indirection_table_size;
354};
355
356enum ib_tm_cap_flags {
357 /* Support tag matching with rendezvous offload for RC transport */
358 IB_TM_CAP_RNDV_RC = 1 << 0,
359};
360
361struct ib_tm_caps {
362 /* Max size of RNDV header */
363 u32 max_rndv_hdr_size;
364 /* Max number of entries in tag matching list */
365 u32 max_num_tags;
366 /* From enum ib_tm_cap_flags */
367 u32 flags;
368 /* Max number of outstanding list operations */
369 u32 max_ops;
370 /* Max number of SGE in tag matching entry */
371 u32 max_sge;
372};
373
374struct ib_cq_init_attr {
375 unsigned int cqe;
376 u32 comp_vector;
377 u32 flags;
378};
379
380enum ib_cq_attr_mask {
381 IB_CQ_MODERATE = 1 << 0,
382};
383
384struct ib_cq_caps {
385 u16 max_cq_moderation_count;
386 u16 max_cq_moderation_period;
387};
388
389struct ib_dm_mr_attr {
390 u64 length;
391 u64 offset;
392 u32 access_flags;
393};
394
395struct ib_dm_alloc_attr {
396 u64 length;
397 u32 alignment;
398 u32 flags;
399};
400
401struct ib_device_attr {
402 u64 fw_ver;
403 __be64 sys_image_guid;
404 u64 max_mr_size;
405 u64 page_size_cap;
406 u32 vendor_id;
407 u32 vendor_part_id;
408 u32 hw_ver;
409 int max_qp;
410 int max_qp_wr;
411 u64 device_cap_flags;
412 u64 kernel_cap_flags;
413 int max_send_sge;
414 int max_recv_sge;
415 int max_sge_rd;
416 int max_cq;
417 int max_cqe;
418 int max_mr;
419 int max_pd;
420 int max_qp_rd_atom;
421 int max_ee_rd_atom;
422 int max_res_rd_atom;
423 int max_qp_init_rd_atom;
424 int max_ee_init_rd_atom;
425 enum ib_atomic_cap atomic_cap;
426 enum ib_atomic_cap masked_atomic_cap;
427 int max_ee;
428 int max_rdd;
429 int max_mw;
430 int max_raw_ipv6_qp;
431 int max_raw_ethy_qp;
432 int max_mcast_grp;
433 int max_mcast_qp_attach;
434 int max_total_mcast_qp_attach;
435 int max_ah;
436 int max_srq;
437 int max_srq_wr;
438 int max_srq_sge;
439 unsigned int max_fast_reg_page_list_len;
440 unsigned int max_pi_fast_reg_page_list_len;
441 u16 max_pkeys;
442 u8 local_ca_ack_delay;
443 int sig_prot_cap;
444 int sig_guard_cap;
445 struct ib_odp_caps odp_caps;
446 uint64_t timestamp_mask;
447 uint64_t hca_core_clock; /* in KHZ */
448 struct ib_rss_caps rss_caps;
449 u32 max_wq_type_rq;
450 u32 raw_packet_caps; /* Use ib_raw_packet_caps enum */
451 struct ib_tm_caps tm_caps;
452 struct ib_cq_caps cq_caps;
453 u64 max_dm_size;
454 /* Max entries for sgl for optimized performance per READ */
455 u32 max_sgl_rd;
456};
457
458enum ib_mtu {
459 IB_MTU_256 = 1,
460 IB_MTU_512 = 2,
461 IB_MTU_1024 = 3,
462 IB_MTU_2048 = 4,
463 IB_MTU_4096 = 5
464};
465
466enum opa_mtu {
467 OPA_MTU_8192 = 6,
468 OPA_MTU_10240 = 7
469};
470
471static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
472{
473 switch (mtu) {
474 case IB_MTU_256: return 256;
475 case IB_MTU_512: return 512;
476 case IB_MTU_1024: return 1024;
477 case IB_MTU_2048: return 2048;
478 case IB_MTU_4096: return 4096;
479 default: return -1;
480 }
481}
482
483static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
484{
485 if (mtu >= 4096)
486 return IB_MTU_4096;
487 else if (mtu >= 2048)
488 return IB_MTU_2048;
489 else if (mtu >= 1024)
490 return IB_MTU_1024;
491 else if (mtu >= 512)
492 return IB_MTU_512;
493 else
494 return IB_MTU_256;
495}
496
497static inline int opa_mtu_enum_to_int(enum opa_mtu mtu)
498{
499 switch (mtu) {
500 case OPA_MTU_8192:
501 return 8192;
502 case OPA_MTU_10240:
503 return 10240;
504 default:
505 return(ib_mtu_enum_to_int((enum ib_mtu)mtu));
506 }
507}
508
509static inline enum opa_mtu opa_mtu_int_to_enum(int mtu)
510{
511 if (mtu >= 10240)
512 return OPA_MTU_10240;
513 else if (mtu >= 8192)
514 return OPA_MTU_8192;
515 else
516 return ((enum opa_mtu)ib_mtu_int_to_enum(mtu));
517}
518
519enum ib_port_state {
520 IB_PORT_NOP = 0,
521 IB_PORT_DOWN = 1,
522 IB_PORT_INIT = 2,
523 IB_PORT_ARMED = 3,
524 IB_PORT_ACTIVE = 4,
525 IB_PORT_ACTIVE_DEFER = 5
526};
527
528static inline const char *__attribute_const__
529ib_port_state_to_str(enum ib_port_state state)
530{
531 const char * const states[] = {
532 [IB_PORT_NOP] = "NOP",
533 [IB_PORT_DOWN] = "DOWN",
534 [IB_PORT_INIT] = "INIT",
535 [IB_PORT_ARMED] = "ARMED",
536 [IB_PORT_ACTIVE] = "ACTIVE",
537 [IB_PORT_ACTIVE_DEFER] = "ACTIVE_DEFER",
538 };
539
540 if (state < ARRAY_SIZE(states))
541 return states[state];
542 return "UNKNOWN";
543}
544
545enum ib_port_phys_state {
546 IB_PORT_PHYS_STATE_SLEEP = 1,
547 IB_PORT_PHYS_STATE_POLLING = 2,
548 IB_PORT_PHYS_STATE_DISABLED = 3,
549 IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING = 4,
550 IB_PORT_PHYS_STATE_LINK_UP = 5,
551 IB_PORT_PHYS_STATE_LINK_ERROR_RECOVERY = 6,
552 IB_PORT_PHYS_STATE_PHY_TEST = 7,
553};
554
555enum ib_port_width {
556 IB_WIDTH_1X = 1,
557 IB_WIDTH_2X = 16,
558 IB_WIDTH_4X = 2,
559 IB_WIDTH_8X = 4,
560 IB_WIDTH_12X = 8
561};
562
563static inline int ib_width_enum_to_int(enum ib_port_width width)
564{
565 switch (width) {
566 case IB_WIDTH_1X: return 1;
567 case IB_WIDTH_2X: return 2;
568 case IB_WIDTH_4X: return 4;
569 case IB_WIDTH_8X: return 8;
570 case IB_WIDTH_12X: return 12;
571 default: return -1;
572 }
573}
574
575enum ib_port_speed {
576 IB_SPEED_SDR = 1,
577 IB_SPEED_DDR = 2,
578 IB_SPEED_QDR = 4,
579 IB_SPEED_FDR10 = 8,
580 IB_SPEED_FDR = 16,
581 IB_SPEED_EDR = 32,
582 IB_SPEED_HDR = 64,
583 IB_SPEED_NDR = 128,
584 IB_SPEED_XDR = 256,
585};
586
587enum ib_stat_flag {
588 IB_STAT_FLAG_OPTIONAL = 1 << 0,
589};
590
591/**
592 * struct rdma_stat_desc - description of one rdma stat/counter
593 * @name: The name of the counter
594 * @flags: Flags of the counter; For example, IB_STAT_FLAG_OPTIONAL
595 * @priv: Driver private information; Core code should not use
596 */
597struct rdma_stat_desc {
598 const char *name;
599 unsigned int flags;
600 const void *priv;
601};
602
603/**
604 * struct rdma_hw_stats - collection of hardware stats and their management
605 * @lock: Mutex to protect parallel write access to lifespan and values
606 * of counters, which are 64bits and not guaranteed to be written
607 * atomicaly on 32bits systems.
608 * @timestamp: Used by the core code to track when the last update was
609 * @lifespan: Used by the core code to determine how old the counters
610 * should be before being updated again. Stored in jiffies, defaults
611 * to 10 milliseconds, drivers can override the default be specifying
612 * their own value during their allocation routine.
613 * @descs: Array of pointers to static descriptors used for the counters
614 * in directory.
615 * @is_disabled: A bitmap to indicate each counter is currently disabled
616 * or not.
617 * @num_counters: How many hardware counters there are. If name is
618 * shorter than this number, a kernel oops will result. Driver authors
619 * are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
620 * in their code to prevent this.
621 * @value: Array of u64 counters that are accessed by the sysfs code and
622 * filled in by the drivers get_stats routine
623 */
624struct rdma_hw_stats {
625 struct mutex lock; /* Protect lifespan and values[] */
626 unsigned long timestamp;
627 unsigned long lifespan;
628 const struct rdma_stat_desc *descs;
629 unsigned long *is_disabled;
630 int num_counters;
631 u64 value[] __counted_by(num_counters);
632};
633
634#define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
635
636struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
637 const struct rdma_stat_desc *descs, int num_counters,
638 unsigned long lifespan);
639
640void rdma_free_hw_stats_struct(struct rdma_hw_stats *stats);
641
642/* Define bits for the various functionality this port needs to be supported by
643 * the core.
644 */
645/* Management 0x00000FFF */
646#define RDMA_CORE_CAP_IB_MAD 0x00000001
647#define RDMA_CORE_CAP_IB_SMI 0x00000002
648#define RDMA_CORE_CAP_IB_CM 0x00000004
649#define RDMA_CORE_CAP_IW_CM 0x00000008
650#define RDMA_CORE_CAP_IB_SA 0x00000010
651#define RDMA_CORE_CAP_OPA_MAD 0x00000020
652
653/* Address format 0x000FF000 */
654#define RDMA_CORE_CAP_AF_IB 0x00001000
655#define RDMA_CORE_CAP_ETH_AH 0x00002000
656#define RDMA_CORE_CAP_OPA_AH 0x00004000
657#define RDMA_CORE_CAP_IB_GRH_REQUIRED 0x00008000
658
659/* Protocol 0xFFF00000 */
660#define RDMA_CORE_CAP_PROT_IB 0x00100000
661#define RDMA_CORE_CAP_PROT_ROCE 0x00200000
662#define RDMA_CORE_CAP_PROT_IWARP 0x00400000
663#define RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP 0x00800000
664#define RDMA_CORE_CAP_PROT_RAW_PACKET 0x01000000
665#define RDMA_CORE_CAP_PROT_USNIC 0x02000000
666
667#define RDMA_CORE_PORT_IB_GRH_REQUIRED (RDMA_CORE_CAP_IB_GRH_REQUIRED \
668 | RDMA_CORE_CAP_PROT_ROCE \
669 | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP)
670
671#define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \
672 | RDMA_CORE_CAP_IB_MAD \
673 | RDMA_CORE_CAP_IB_SMI \
674 | RDMA_CORE_CAP_IB_CM \
675 | RDMA_CORE_CAP_IB_SA \
676 | RDMA_CORE_CAP_AF_IB)
677#define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \
678 | RDMA_CORE_CAP_IB_MAD \
679 | RDMA_CORE_CAP_IB_CM \
680 | RDMA_CORE_CAP_AF_IB \
681 | RDMA_CORE_CAP_ETH_AH)
682#define RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP \
683 (RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP \
684 | RDMA_CORE_CAP_IB_MAD \
685 | RDMA_CORE_CAP_IB_CM \
686 | RDMA_CORE_CAP_AF_IB \
687 | RDMA_CORE_CAP_ETH_AH)
688#define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \
689 | RDMA_CORE_CAP_IW_CM)
690#define RDMA_CORE_PORT_INTEL_OPA (RDMA_CORE_PORT_IBA_IB \
691 | RDMA_CORE_CAP_OPA_MAD)
692
693#define RDMA_CORE_PORT_RAW_PACKET (RDMA_CORE_CAP_PROT_RAW_PACKET)
694
695#define RDMA_CORE_PORT_USNIC (RDMA_CORE_CAP_PROT_USNIC)
696
697struct ib_port_attr {
698 u64 subnet_prefix;
699 enum ib_port_state state;
700 enum ib_mtu max_mtu;
701 enum ib_mtu active_mtu;
702 u32 phys_mtu;
703 int gid_tbl_len;
704 unsigned int ip_gids:1;
705 /* This is the value from PortInfo CapabilityMask, defined by IBA */
706 u32 port_cap_flags;
707 u32 max_msg_sz;
708 u32 bad_pkey_cntr;
709 u32 qkey_viol_cntr;
710 u16 pkey_tbl_len;
711 u32 sm_lid;
712 u32 lid;
713 u8 lmc;
714 u8 max_vl_num;
715 u8 sm_sl;
716 u8 subnet_timeout;
717 u8 init_type_reply;
718 u8 active_width;
719 u16 active_speed;
720 u8 phys_state;
721 u16 port_cap_flags2;
722};
723
724enum ib_device_modify_flags {
725 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
726 IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
727};
728
729#define IB_DEVICE_NODE_DESC_MAX 64
730
731struct ib_device_modify {
732 u64 sys_image_guid;
733 char node_desc[IB_DEVICE_NODE_DESC_MAX];
734};
735
736enum ib_port_modify_flags {
737 IB_PORT_SHUTDOWN = 1,
738 IB_PORT_INIT_TYPE = (1<<2),
739 IB_PORT_RESET_QKEY_CNTR = (1<<3),
740 IB_PORT_OPA_MASK_CHG = (1<<4)
741};
742
743struct ib_port_modify {
744 u32 set_port_cap_mask;
745 u32 clr_port_cap_mask;
746 u8 init_type;
747};
748
749enum ib_event_type {
750 IB_EVENT_CQ_ERR,
751 IB_EVENT_QP_FATAL,
752 IB_EVENT_QP_REQ_ERR,
753 IB_EVENT_QP_ACCESS_ERR,
754 IB_EVENT_COMM_EST,
755 IB_EVENT_SQ_DRAINED,
756 IB_EVENT_PATH_MIG,
757 IB_EVENT_PATH_MIG_ERR,
758 IB_EVENT_DEVICE_FATAL,
759 IB_EVENT_PORT_ACTIVE,
760 IB_EVENT_PORT_ERR,
761 IB_EVENT_LID_CHANGE,
762 IB_EVENT_PKEY_CHANGE,
763 IB_EVENT_SM_CHANGE,
764 IB_EVENT_SRQ_ERR,
765 IB_EVENT_SRQ_LIMIT_REACHED,
766 IB_EVENT_QP_LAST_WQE_REACHED,
767 IB_EVENT_CLIENT_REREGISTER,
768 IB_EVENT_GID_CHANGE,
769 IB_EVENT_WQ_FATAL,
770 IB_EVENT_DEVICE_SPEED_CHANGE,
771};
772
773const char *__attribute_const__ ib_event_msg(enum ib_event_type event);
774
775struct ib_event {
776 struct ib_device *device;
777 union {
778 struct ib_cq *cq;
779 struct ib_qp *qp;
780 struct ib_srq *srq;
781 struct ib_wq *wq;
782 u32 port_num;
783 } element;
784 enum ib_event_type event;
785};
786
787struct ib_event_handler {
788 struct ib_device *device;
789 void (*handler)(struct ib_event_handler *, struct ib_event *);
790 struct list_head list;
791};
792
793#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
794 do { \
795 (_ptr)->device = _device; \
796 (_ptr)->handler = _handler; \
797 INIT_LIST_HEAD(&(_ptr)->list); \
798 } while (0)
799
800struct ib_global_route {
801 const struct ib_gid_attr *sgid_attr;
802 union ib_gid dgid;
803 u32 flow_label;
804 u8 sgid_index;
805 u8 hop_limit;
806 u8 traffic_class;
807};
808
809struct ib_grh {
810 __be32 version_tclass_flow;
811 __be16 paylen;
812 u8 next_hdr;
813 u8 hop_limit;
814 union ib_gid sgid;
815 union ib_gid dgid;
816};
817
818union rdma_network_hdr {
819 struct ib_grh ibgrh;
820 struct {
821 /* The IB spec states that if it's IPv4, the header
822 * is located in the last 20 bytes of the header.
823 */
824 u8 reserved[20];
825 struct iphdr roce4grh;
826 };
827};
828
829#define IB_QPN_MASK 0xFFFFFF
830
831enum {
832 IB_MULTICAST_QPN = 0xffffff
833};
834
835#define IB_LID_PERMISSIVE cpu_to_be16(0xFFFF)
836#define IB_MULTICAST_LID_BASE cpu_to_be16(0xC000)
837
838enum ib_ah_flags {
839 IB_AH_GRH = 1
840};
841
842enum ib_rate {
843 IB_RATE_PORT_CURRENT = 0,
844 IB_RATE_2_5_GBPS = 2,
845 IB_RATE_5_GBPS = 5,
846 IB_RATE_10_GBPS = 3,
847 IB_RATE_20_GBPS = 6,
848 IB_RATE_30_GBPS = 4,
849 IB_RATE_40_GBPS = 7,
850 IB_RATE_60_GBPS = 8,
851 IB_RATE_80_GBPS = 9,
852 IB_RATE_120_GBPS = 10,
853 IB_RATE_14_GBPS = 11,
854 IB_RATE_56_GBPS = 12,
855 IB_RATE_112_GBPS = 13,
856 IB_RATE_168_GBPS = 14,
857 IB_RATE_25_GBPS = 15,
858 IB_RATE_100_GBPS = 16,
859 IB_RATE_200_GBPS = 17,
860 IB_RATE_300_GBPS = 18,
861 IB_RATE_28_GBPS = 19,
862 IB_RATE_50_GBPS = 20,
863 IB_RATE_400_GBPS = 21,
864 IB_RATE_600_GBPS = 22,
865 IB_RATE_800_GBPS = 23,
866 IB_RATE_1600_GBPS = 25,
867};
868
869/**
870 * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
871 * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be
872 * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
873 * @rate: rate to convert.
874 */
875__attribute_const__ int ib_rate_to_mult(enum ib_rate rate);
876
877/**
878 * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
879 * For example, IB_RATE_2_5_GBPS will be converted to 2500.
880 * @rate: rate to convert.
881 */
882__attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
883
884struct ib_port_speed_info {
885 const char *str;
886 int rate; /* in deci-Gb/sec (100 MBps units) */
887};
888
889/**
890 * ib_port_attr_to_speed_info - Convert port attributes to speed information
891 * @attr: Port attributes containing active_speed and active_width
892 * @speed_info: Speed information to return
893 *
894 * Returns 0 on success, -EINVAL on error.
895 */
896int ib_port_attr_to_speed_info(struct ib_port_attr *attr,
897 struct ib_port_speed_info *speed_info);
898
899/**
900 * enum ib_mr_type - memory region type
901 * @IB_MR_TYPE_MEM_REG: memory region that is used for
902 * normal registration
903 * @IB_MR_TYPE_SG_GAPS: memory region that is capable to
904 * register any arbitrary sg lists (without
905 * the normal mr constraints - see
906 * ib_map_mr_sg)
907 * @IB_MR_TYPE_DM: memory region that is used for device
908 * memory registration
909 * @IB_MR_TYPE_USER: memory region that is used for the user-space
910 * application
911 * @IB_MR_TYPE_DMA: memory region that is used for DMA operations
912 * without address translations (VA=PA)
913 * @IB_MR_TYPE_INTEGRITY: memory region that is used for
914 * data integrity operations
915 */
916enum ib_mr_type {
917 IB_MR_TYPE_MEM_REG,
918 IB_MR_TYPE_SG_GAPS,
919 IB_MR_TYPE_DM,
920 IB_MR_TYPE_USER,
921 IB_MR_TYPE_DMA,
922 IB_MR_TYPE_INTEGRITY,
923};
924
925enum ib_mr_status_check {
926 IB_MR_CHECK_SIG_STATUS = 1,
927};
928
929/**
930 * struct ib_mr_status - Memory region status container
931 *
932 * @fail_status: Bitmask of MR checks status. For each
933 * failed check a corresponding status bit is set.
934 * @sig_err: Additional info for IB_MR_CEHCK_SIG_STATUS
935 * failure.
936 */
937struct ib_mr_status {
938 u32 fail_status;
939 struct ib_sig_err sig_err;
940};
941
942/**
943 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
944 * enum.
945 * @mult: multiple to convert.
946 */
947__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
948
949struct rdma_ah_init_attr {
950 struct rdma_ah_attr *ah_attr;
951 u32 flags;
952 struct net_device *xmit_slave;
953};
954
955enum rdma_ah_attr_type {
956 RDMA_AH_ATTR_TYPE_UNDEFINED,
957 RDMA_AH_ATTR_TYPE_IB,
958 RDMA_AH_ATTR_TYPE_ROCE,
959 RDMA_AH_ATTR_TYPE_OPA,
960};
961
962struct ib_ah_attr {
963 u16 dlid;
964 u8 src_path_bits;
965};
966
967struct roce_ah_attr {
968 u8 dmac[ETH_ALEN];
969};
970
971struct opa_ah_attr {
972 u32 dlid;
973 u8 src_path_bits;
974 bool make_grd;
975};
976
977struct rdma_ah_attr {
978 struct ib_global_route grh;
979 u8 sl;
980 u8 static_rate;
981 u32 port_num;
982 u8 ah_flags;
983 enum rdma_ah_attr_type type;
984 union {
985 struct ib_ah_attr ib;
986 struct roce_ah_attr roce;
987 struct opa_ah_attr opa;
988 };
989};
990
991enum ib_wc_status {
992 IB_WC_SUCCESS,
993 IB_WC_LOC_LEN_ERR,
994 IB_WC_LOC_QP_OP_ERR,
995 IB_WC_LOC_EEC_OP_ERR,
996 IB_WC_LOC_PROT_ERR,
997 IB_WC_WR_FLUSH_ERR,
998 IB_WC_MW_BIND_ERR,
999 IB_WC_BAD_RESP_ERR,
1000 IB_WC_LOC_ACCESS_ERR,
1001 IB_WC_REM_INV_REQ_ERR,
1002 IB_WC_REM_ACCESS_ERR,
1003 IB_WC_REM_OP_ERR,
1004 IB_WC_RETRY_EXC_ERR,
1005 IB_WC_RNR_RETRY_EXC_ERR,
1006 IB_WC_LOC_RDD_VIOL_ERR,
1007 IB_WC_REM_INV_RD_REQ_ERR,
1008 IB_WC_REM_ABORT_ERR,
1009 IB_WC_INV_EECN_ERR,
1010 IB_WC_INV_EEC_STATE_ERR,
1011 IB_WC_FATAL_ERR,
1012 IB_WC_RESP_TIMEOUT_ERR,
1013 IB_WC_GENERAL_ERR
1014};
1015
1016const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status);
1017
1018enum ib_wc_opcode {
1019 IB_WC_SEND = IB_UVERBS_WC_SEND,
1020 IB_WC_RDMA_WRITE = IB_UVERBS_WC_RDMA_WRITE,
1021 IB_WC_RDMA_READ = IB_UVERBS_WC_RDMA_READ,
1022 IB_WC_COMP_SWAP = IB_UVERBS_WC_COMP_SWAP,
1023 IB_WC_FETCH_ADD = IB_UVERBS_WC_FETCH_ADD,
1024 IB_WC_BIND_MW = IB_UVERBS_WC_BIND_MW,
1025 IB_WC_LOCAL_INV = IB_UVERBS_WC_LOCAL_INV,
1026 IB_WC_LSO = IB_UVERBS_WC_TSO,
1027 IB_WC_ATOMIC_WRITE = IB_UVERBS_WC_ATOMIC_WRITE,
1028 IB_WC_REG_MR,
1029 IB_WC_MASKED_COMP_SWAP,
1030 IB_WC_MASKED_FETCH_ADD,
1031 IB_WC_FLUSH = IB_UVERBS_WC_FLUSH,
1032/*
1033 * Set value of IB_WC_RECV so consumers can test if a completion is a
1034 * receive by testing (opcode & IB_WC_RECV).
1035 */
1036 IB_WC_RECV = 1 << 7,
1037 IB_WC_RECV_RDMA_WITH_IMM
1038};
1039
1040enum ib_wc_flags {
1041 IB_WC_GRH = 1,
1042 IB_WC_WITH_IMM = (1<<1),
1043 IB_WC_WITH_INVALIDATE = (1<<2),
1044 IB_WC_IP_CSUM_OK = (1<<3),
1045 IB_WC_WITH_SMAC = (1<<4),
1046 IB_WC_WITH_VLAN = (1<<5),
1047 IB_WC_WITH_NETWORK_HDR_TYPE = (1<<6),
1048};
1049
1050struct ib_wc {
1051 union {
1052 u64 wr_id;
1053 struct ib_cqe *wr_cqe;
1054 };
1055 enum ib_wc_status status;
1056 enum ib_wc_opcode opcode;
1057 u32 vendor_err;
1058 u32 byte_len;
1059 struct ib_qp *qp;
1060 union {
1061 __be32 imm_data;
1062 u32 invalidate_rkey;
1063 } ex;
1064 u32 src_qp;
1065 u32 slid;
1066 int wc_flags;
1067 u16 pkey_index;
1068 u8 sl;
1069 u8 dlid_path_bits;
1070 u32 port_num; /* valid only for DR SMPs on switches */
1071 u8 smac[ETH_ALEN];
1072 u16 vlan_id;
1073 u8 network_hdr_type;
1074};
1075
1076enum ib_cq_notify_flags {
1077 IB_CQ_SOLICITED = 1 << 0,
1078 IB_CQ_NEXT_COMP = 1 << 1,
1079 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
1080 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
1081};
1082
1083enum ib_srq_type {
1084 IB_SRQT_BASIC = IB_UVERBS_SRQT_BASIC,
1085 IB_SRQT_XRC = IB_UVERBS_SRQT_XRC,
1086 IB_SRQT_TM = IB_UVERBS_SRQT_TM,
1087};
1088
1089static inline bool ib_srq_has_cq(enum ib_srq_type srq_type)
1090{
1091 return srq_type == IB_SRQT_XRC ||
1092 srq_type == IB_SRQT_TM;
1093}
1094
1095enum ib_srq_attr_mask {
1096 IB_SRQ_MAX_WR = 1 << 0,
1097 IB_SRQ_LIMIT = 1 << 1,
1098};
1099
1100struct ib_srq_attr {
1101 u32 max_wr;
1102 u32 max_sge;
1103 u32 srq_limit;
1104};
1105
1106struct ib_srq_init_attr {
1107 void (*event_handler)(struct ib_event *, void *);
1108 void *srq_context;
1109 struct ib_srq_attr attr;
1110 enum ib_srq_type srq_type;
1111
1112 struct {
1113 struct ib_cq *cq;
1114 union {
1115 struct {
1116 struct ib_xrcd *xrcd;
1117 } xrc;
1118
1119 struct {
1120 u32 max_num_tags;
1121 } tag_matching;
1122 };
1123 } ext;
1124};
1125
1126struct ib_qp_cap {
1127 u32 max_send_wr;
1128 u32 max_recv_wr;
1129 u32 max_send_sge;
1130 u32 max_recv_sge;
1131 u32 max_inline_data;
1132
1133 /*
1134 * Maximum number of rdma_rw_ctx structures in flight at a time.
1135 * ib_create_qp() will calculate the right amount of needed WRs
1136 * and MRs based on this.
1137 */
1138 u32 max_rdma_ctxs;
1139};
1140
1141enum ib_sig_type {
1142 IB_SIGNAL_ALL_WR,
1143 IB_SIGNAL_REQ_WR
1144};
1145
1146enum ib_qp_type {
1147 /*
1148 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
1149 * here (and in that order) since the MAD layer uses them as
1150 * indices into a 2-entry table.
1151 */
1152 IB_QPT_SMI,
1153 IB_QPT_GSI,
1154
1155 IB_QPT_RC = IB_UVERBS_QPT_RC,
1156 IB_QPT_UC = IB_UVERBS_QPT_UC,
1157 IB_QPT_UD = IB_UVERBS_QPT_UD,
1158 IB_QPT_RAW_IPV6,
1159 IB_QPT_RAW_ETHERTYPE,
1160 IB_QPT_RAW_PACKET = IB_UVERBS_QPT_RAW_PACKET,
1161 IB_QPT_XRC_INI = IB_UVERBS_QPT_XRC_INI,
1162 IB_QPT_XRC_TGT = IB_UVERBS_QPT_XRC_TGT,
1163 IB_QPT_MAX,
1164 IB_QPT_DRIVER = IB_UVERBS_QPT_DRIVER,
1165 /* Reserve a range for qp types internal to the low level driver.
1166 * These qp types will not be visible at the IB core layer, so the
1167 * IB_QPT_MAX usages should not be affected in the core layer
1168 */
1169 IB_QPT_RESERVED1 = 0x1000,
1170 IB_QPT_RESERVED2,
1171 IB_QPT_RESERVED3,
1172 IB_QPT_RESERVED4,
1173 IB_QPT_RESERVED5,
1174 IB_QPT_RESERVED6,
1175 IB_QPT_RESERVED7,
1176 IB_QPT_RESERVED8,
1177 IB_QPT_RESERVED9,
1178 IB_QPT_RESERVED10,
1179};
1180
1181enum ib_qp_create_flags {
1182 IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
1183 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK =
1184 IB_UVERBS_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
1185 IB_QP_CREATE_CROSS_CHANNEL = 1 << 2,
1186 IB_QP_CREATE_MANAGED_SEND = 1 << 3,
1187 IB_QP_CREATE_MANAGED_RECV = 1 << 4,
1188 IB_QP_CREATE_NETIF_QP = 1 << 5,
1189 IB_QP_CREATE_INTEGRITY_EN = 1 << 6,
1190 IB_QP_CREATE_NETDEV_USE = 1 << 7,
1191 IB_QP_CREATE_SCATTER_FCS =
1192 IB_UVERBS_QP_CREATE_SCATTER_FCS,
1193 IB_QP_CREATE_CVLAN_STRIPPING =
1194 IB_UVERBS_QP_CREATE_CVLAN_STRIPPING,
1195 IB_QP_CREATE_SOURCE_QPN = 1 << 10,
1196 IB_QP_CREATE_PCI_WRITE_END_PADDING =
1197 IB_UVERBS_QP_CREATE_PCI_WRITE_END_PADDING,
1198 /* reserve bits 26-31 for low level drivers' internal use */
1199 IB_QP_CREATE_RESERVED_START = 1 << 26,
1200 IB_QP_CREATE_RESERVED_END = 1 << 31,
1201};
1202
1203/*
1204 * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
1205 * callback to destroy the passed in QP.
1206 */
1207
1208struct ib_qp_init_attr {
1209 /* This callback occurs in workqueue context */
1210 void (*event_handler)(struct ib_event *, void *);
1211
1212 void *qp_context;
1213 struct ib_cq *send_cq;
1214 struct ib_cq *recv_cq;
1215 struct ib_srq *srq;
1216 struct ib_xrcd *xrcd; /* XRC TGT QPs only */
1217 struct ib_qp_cap cap;
1218 enum ib_sig_type sq_sig_type;
1219 enum ib_qp_type qp_type;
1220 u32 create_flags;
1221
1222 /*
1223 * Only needed for special QP types, or when using the RW API.
1224 */
1225 u32 port_num;
1226 struct ib_rwq_ind_table *rwq_ind_tbl;
1227 u32 source_qpn;
1228};
1229
1230struct ib_qp_open_attr {
1231 void (*event_handler)(struct ib_event *, void *);
1232 void *qp_context;
1233 u32 qp_num;
1234 enum ib_qp_type qp_type;
1235};
1236
1237enum ib_rnr_timeout {
1238 IB_RNR_TIMER_655_36 = 0,
1239 IB_RNR_TIMER_000_01 = 1,
1240 IB_RNR_TIMER_000_02 = 2,
1241 IB_RNR_TIMER_000_03 = 3,
1242 IB_RNR_TIMER_000_04 = 4,
1243 IB_RNR_TIMER_000_06 = 5,
1244 IB_RNR_TIMER_000_08 = 6,
1245 IB_RNR_TIMER_000_12 = 7,
1246 IB_RNR_TIMER_000_16 = 8,
1247 IB_RNR_TIMER_000_24 = 9,
1248 IB_RNR_TIMER_000_32 = 10,
1249 IB_RNR_TIMER_000_48 = 11,
1250 IB_RNR_TIMER_000_64 = 12,
1251 IB_RNR_TIMER_000_96 = 13,
1252 IB_RNR_TIMER_001_28 = 14,
1253 IB_RNR_TIMER_001_92 = 15,
1254 IB_RNR_TIMER_002_56 = 16,
1255 IB_RNR_TIMER_003_84 = 17,
1256 IB_RNR_TIMER_005_12 = 18,
1257 IB_RNR_TIMER_007_68 = 19,
1258 IB_RNR_TIMER_010_24 = 20,
1259 IB_RNR_TIMER_015_36 = 21,
1260 IB_RNR_TIMER_020_48 = 22,
1261 IB_RNR_TIMER_030_72 = 23,
1262 IB_RNR_TIMER_040_96 = 24,
1263 IB_RNR_TIMER_061_44 = 25,
1264 IB_RNR_TIMER_081_92 = 26,
1265 IB_RNR_TIMER_122_88 = 27,
1266 IB_RNR_TIMER_163_84 = 28,
1267 IB_RNR_TIMER_245_76 = 29,
1268 IB_RNR_TIMER_327_68 = 30,
1269 IB_RNR_TIMER_491_52 = 31
1270};
1271
1272enum ib_qp_attr_mask {
1273 IB_QP_STATE = 1,
1274 IB_QP_CUR_STATE = (1<<1),
1275 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
1276 IB_QP_ACCESS_FLAGS = (1<<3),
1277 IB_QP_PKEY_INDEX = (1<<4),
1278 IB_QP_PORT = (1<<5),
1279 IB_QP_QKEY = (1<<6),
1280 IB_QP_AV = (1<<7),
1281 IB_QP_PATH_MTU = (1<<8),
1282 IB_QP_TIMEOUT = (1<<9),
1283 IB_QP_RETRY_CNT = (1<<10),
1284 IB_QP_RNR_RETRY = (1<<11),
1285 IB_QP_RQ_PSN = (1<<12),
1286 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
1287 IB_QP_ALT_PATH = (1<<14),
1288 IB_QP_MIN_RNR_TIMER = (1<<15),
1289 IB_QP_SQ_PSN = (1<<16),
1290 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
1291 IB_QP_PATH_MIG_STATE = (1<<18),
1292 IB_QP_CAP = (1<<19),
1293 IB_QP_DEST_QPN = (1<<20),
1294 IB_QP_RESERVED1 = (1<<21),
1295 IB_QP_RESERVED2 = (1<<22),
1296 IB_QP_RESERVED3 = (1<<23),
1297 IB_QP_RESERVED4 = (1<<24),
1298 IB_QP_RATE_LIMIT = (1<<25),
1299
1300 IB_QP_ATTR_STANDARD_BITS = GENMASK(20, 0),
1301};
1302
1303enum ib_qp_state {
1304 IB_QPS_RESET,
1305 IB_QPS_INIT,
1306 IB_QPS_RTR,
1307 IB_QPS_RTS,
1308 IB_QPS_SQD,
1309 IB_QPS_SQE,
1310 IB_QPS_ERR
1311};
1312
1313enum ib_mig_state {
1314 IB_MIG_MIGRATED,
1315 IB_MIG_REARM,
1316 IB_MIG_ARMED
1317};
1318
1319enum ib_mw_type {
1320 IB_MW_TYPE_1 = 1,
1321 IB_MW_TYPE_2 = 2
1322};
1323
1324struct ib_qp_attr {
1325 enum ib_qp_state qp_state;
1326 enum ib_qp_state cur_qp_state;
1327 enum ib_mtu path_mtu;
1328 enum ib_mig_state path_mig_state;
1329 u32 qkey;
1330 u32 rq_psn;
1331 u32 sq_psn;
1332 u32 dest_qp_num;
1333 int qp_access_flags;
1334 struct ib_qp_cap cap;
1335 struct rdma_ah_attr ah_attr;
1336 struct rdma_ah_attr alt_ah_attr;
1337 u16 pkey_index;
1338 u16 alt_pkey_index;
1339 u8 en_sqd_async_notify;
1340 u8 sq_draining;
1341 u8 max_rd_atomic;
1342 u8 max_dest_rd_atomic;
1343 u8 min_rnr_timer;
1344 u32 port_num;
1345 u8 timeout;
1346 u8 retry_cnt;
1347 u8 rnr_retry;
1348 u32 alt_port_num;
1349 u8 alt_timeout;
1350 u32 rate_limit;
1351 struct net_device *xmit_slave;
1352};
1353
1354enum ib_wr_opcode {
1355 /* These are shared with userspace */
1356 IB_WR_RDMA_WRITE = IB_UVERBS_WR_RDMA_WRITE,
1357 IB_WR_RDMA_WRITE_WITH_IMM = IB_UVERBS_WR_RDMA_WRITE_WITH_IMM,
1358 IB_WR_SEND = IB_UVERBS_WR_SEND,
1359 IB_WR_SEND_WITH_IMM = IB_UVERBS_WR_SEND_WITH_IMM,
1360 IB_WR_RDMA_READ = IB_UVERBS_WR_RDMA_READ,
1361 IB_WR_ATOMIC_CMP_AND_SWP = IB_UVERBS_WR_ATOMIC_CMP_AND_SWP,
1362 IB_WR_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD,
1363 IB_WR_BIND_MW = IB_UVERBS_WR_BIND_MW,
1364 IB_WR_LSO = IB_UVERBS_WR_TSO,
1365 IB_WR_SEND_WITH_INV = IB_UVERBS_WR_SEND_WITH_INV,
1366 IB_WR_RDMA_READ_WITH_INV = IB_UVERBS_WR_RDMA_READ_WITH_INV,
1367 IB_WR_LOCAL_INV = IB_UVERBS_WR_LOCAL_INV,
1368 IB_WR_MASKED_ATOMIC_CMP_AND_SWP =
1369 IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP,
1370 IB_WR_MASKED_ATOMIC_FETCH_AND_ADD =
1371 IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD,
1372 IB_WR_FLUSH = IB_UVERBS_WR_FLUSH,
1373 IB_WR_ATOMIC_WRITE = IB_UVERBS_WR_ATOMIC_WRITE,
1374
1375 /* These are kernel only and can not be issued by userspace */
1376 IB_WR_REG_MR = 0x20,
1377 IB_WR_REG_MR_INTEGRITY,
1378
1379 /* reserve values for low level drivers' internal use.
1380 * These values will not be used at all in the ib core layer.
1381 */
1382 IB_WR_RESERVED1 = 0xf0,
1383 IB_WR_RESERVED2,
1384 IB_WR_RESERVED3,
1385 IB_WR_RESERVED4,
1386 IB_WR_RESERVED5,
1387 IB_WR_RESERVED6,
1388 IB_WR_RESERVED7,
1389 IB_WR_RESERVED8,
1390 IB_WR_RESERVED9,
1391 IB_WR_RESERVED10,
1392};
1393
1394enum ib_send_flags {
1395 IB_SEND_FENCE = 1,
1396 IB_SEND_SIGNALED = (1<<1),
1397 IB_SEND_SOLICITED = (1<<2),
1398 IB_SEND_INLINE = (1<<3),
1399 IB_SEND_IP_CSUM = (1<<4),
1400
1401 /* reserve bits 26-31 for low level drivers' internal use */
1402 IB_SEND_RESERVED_START = (1 << 26),
1403 IB_SEND_RESERVED_END = (1 << 31),
1404};
1405
1406struct ib_sge {
1407 u64 addr;
1408 u32 length;
1409 u32 lkey;
1410};
1411
1412struct ib_cqe {
1413 void (*done)(struct ib_cq *cq, struct ib_wc *wc);
1414};
1415
1416struct ib_send_wr {
1417 struct ib_send_wr *next;
1418 union {
1419 u64 wr_id;
1420 struct ib_cqe *wr_cqe;
1421 };
1422 struct ib_sge *sg_list;
1423 int num_sge;
1424 enum ib_wr_opcode opcode;
1425 int send_flags;
1426 union {
1427 __be32 imm_data;
1428 u32 invalidate_rkey;
1429 } ex;
1430};
1431
1432struct ib_rdma_wr {
1433 struct ib_send_wr wr;
1434 u64 remote_addr;
1435 u32 rkey;
1436};
1437
1438static inline const struct ib_rdma_wr *rdma_wr(const struct ib_send_wr *wr)
1439{
1440 return container_of(wr, struct ib_rdma_wr, wr);
1441}
1442
1443struct ib_atomic_wr {
1444 struct ib_send_wr wr;
1445 u64 remote_addr;
1446 u64 compare_add;
1447 u64 swap;
1448 u64 compare_add_mask;
1449 u64 swap_mask;
1450 u32 rkey;
1451};
1452
1453static inline const struct ib_atomic_wr *atomic_wr(const struct ib_send_wr *wr)
1454{
1455 return container_of(wr, struct ib_atomic_wr, wr);
1456}
1457
1458struct ib_ud_wr {
1459 struct ib_send_wr wr;
1460 struct ib_ah *ah;
1461 void *header;
1462 int hlen;
1463 int mss;
1464 u32 remote_qpn;
1465 u32 remote_qkey;
1466 u16 pkey_index; /* valid for GSI only */
1467 u32 port_num; /* valid for DR SMPs on switch only */
1468};
1469
1470static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr)
1471{
1472 return container_of(wr, struct ib_ud_wr, wr);
1473}
1474
1475struct ib_reg_wr {
1476 struct ib_send_wr wr;
1477 struct ib_mr *mr;
1478 u32 key;
1479 int access;
1480};
1481
1482static inline const struct ib_reg_wr *reg_wr(const struct ib_send_wr *wr)
1483{
1484 return container_of(wr, struct ib_reg_wr, wr);
1485}
1486
1487struct ib_recv_wr {
1488 struct ib_recv_wr *next;
1489 union {
1490 u64 wr_id;
1491 struct ib_cqe *wr_cqe;
1492 };
1493 struct ib_sge *sg_list;
1494 int num_sge;
1495};
1496
1497enum ib_access_flags {
1498 IB_ACCESS_LOCAL_WRITE = IB_UVERBS_ACCESS_LOCAL_WRITE,
1499 IB_ACCESS_REMOTE_WRITE = IB_UVERBS_ACCESS_REMOTE_WRITE,
1500 IB_ACCESS_REMOTE_READ = IB_UVERBS_ACCESS_REMOTE_READ,
1501 IB_ACCESS_REMOTE_ATOMIC = IB_UVERBS_ACCESS_REMOTE_ATOMIC,
1502 IB_ACCESS_MW_BIND = IB_UVERBS_ACCESS_MW_BIND,
1503 IB_ZERO_BASED = IB_UVERBS_ACCESS_ZERO_BASED,
1504 IB_ACCESS_ON_DEMAND = IB_UVERBS_ACCESS_ON_DEMAND,
1505 IB_ACCESS_HUGETLB = IB_UVERBS_ACCESS_HUGETLB,
1506 IB_ACCESS_RELAXED_ORDERING = IB_UVERBS_ACCESS_RELAXED_ORDERING,
1507 IB_ACCESS_FLUSH_GLOBAL = IB_UVERBS_ACCESS_FLUSH_GLOBAL,
1508 IB_ACCESS_FLUSH_PERSISTENT = IB_UVERBS_ACCESS_FLUSH_PERSISTENT,
1509
1510 IB_ACCESS_OPTIONAL = IB_UVERBS_ACCESS_OPTIONAL_RANGE,
1511 IB_ACCESS_SUPPORTED =
1512 ((IB_ACCESS_FLUSH_PERSISTENT << 1) - 1) | IB_ACCESS_OPTIONAL,
1513};
1514
1515/*
1516 * XXX: these are apparently used for ->rereg_user_mr, no idea why they
1517 * are hidden here instead of a uapi header!
1518 */
1519enum ib_mr_rereg_flags {
1520 IB_MR_REREG_TRANS = 1,
1521 IB_MR_REREG_PD = (1<<1),
1522 IB_MR_REREG_ACCESS = (1<<2),
1523 IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1)
1524};
1525
1526struct ib_umem;
1527
1528enum rdma_remove_reason {
1529 /*
1530 * Userspace requested uobject deletion or initial try
1531 * to remove uobject via cleanup. Call could fail
1532 */
1533 RDMA_REMOVE_DESTROY,
1534 /* Context deletion. This call should delete the actual object itself */
1535 RDMA_REMOVE_CLOSE,
1536 /* Driver is being hot-unplugged. This call should delete the actual object itself */
1537 RDMA_REMOVE_DRIVER_REMOVE,
1538 /* uobj is being cleaned-up before being committed */
1539 RDMA_REMOVE_ABORT,
1540 /* The driver failed to destroy the uobject and is being disconnected */
1541 RDMA_REMOVE_DRIVER_FAILURE,
1542};
1543
1544struct ib_rdmacg_object {
1545#ifdef CONFIG_CGROUP_RDMA
1546 struct rdma_cgroup *cg; /* owner rdma cgroup */
1547#endif
1548};
1549
1550struct ib_ucontext {
1551 struct ib_device *device;
1552 struct ib_uverbs_file *ufile;
1553
1554 struct ib_rdmacg_object cg_obj;
1555 u64 enabled_caps;
1556 /*
1557 * Implementation details of the RDMA core, don't use in drivers:
1558 */
1559 struct rdma_restrack_entry res;
1560 struct xarray mmap_xa;
1561};
1562
1563struct ib_uobject {
1564 u64 user_handle; /* handle given to us by userspace */
1565 /* ufile & ucontext owning this object */
1566 struct ib_uverbs_file *ufile;
1567 /* FIXME, save memory: ufile->context == context */
1568 struct ib_ucontext *context; /* associated user context */
1569 void *object; /* containing object */
1570 struct list_head list; /* link to context's list */
1571 struct ib_rdmacg_object cg_obj; /* rdmacg object */
1572 int id; /* index into kernel idr */
1573 struct kref ref;
1574 atomic_t usecnt; /* protects exclusive access */
1575 struct rcu_head rcu; /* kfree_rcu() overhead */
1576
1577 const struct uverbs_api_object *uapi_object;
1578};
1579
1580/**
1581 * struct ib_udata - Driver request/response data from userspace
1582 * @inbuf: Pointer to request data from userspace
1583 * @outbuf: Pointer to response buffer in userspace
1584 * @inlen: Length of request data
1585 * @outlen: Length of response buffer
1586 *
1587 * struct ib_udata is used to hold the driver data request and response
1588 * structures defined in the uapi. They follow these rules for forwards and
1589 * backwards compatibility:
1590 *
1591 * 1) Userspace can provide a longer request so long as the trailing part the
1592 * kernel doesn't understand is all zeros.
1593 *
1594 * This provides a degree of safety if userspace wrongly tries to use a new
1595 * feature the kernel does not understand with some non-zero value.
1596 *
1597 * It allows a simpler rdma-core implementation because the library can
1598 * simply always use the latest structs for the request, even if they are
1599 * bigger. It simply has to avoid using the new members if they are not
1600 * supported/required.
1601 *
1602 * 2) Userspace can provide a shorter request; the kernel will zero-pad it out
1603 * to fill the storage. The newer kernel should understand that older
1604 * userspace will provide 0 to new fields. The kernel has three options to
1605 * enable new request fields:
1606 *
1607 * - Input comp_mask that says the field is supported
1608 * - Look for non-zero values
1609 * - Check if the udata->inlen size covers the field
1610 *
1611 * This also corrects any bugs related to not filling in request structures
1612 * as the new helper always fully writes to the struct.
1613 *
1614 * 3) Userspace can provide a shorter or longer response struct. If shorter,
1615 * the kernel reply is truncated. The kernel should be designed to not write
1616 * to new reply fields unless userspace has affirmatively requested them.
1617 *
1618 * If the user buffer is longer, the kernel will zero-fill it.
1619 *
1620 * Userspace has three options to enable new response fields:
1621 *
1622 * - Output comp_mask that says the field is supported
1623 * - Look for non-zero values
1624 * - Infer the output must be valid because the request contents demand it
1625 * and old kernels will fail the request
1626 *
1627 * The following helper functions implement these semantics:
1628 *
1629 * ib_copy_validate_udata_in() - Checks the minimum length, and zero trailing::
1630 *
1631 * struct driver_create_cq_req req;
1632 * int err;
1633 *
1634 * err = ib_copy_validate_udata_in(udata, req, end_member);
1635 * if (err)
1636 * return err;
1637 *
1638 * The third argument specifies the last member of the struct in the first
1639 * kernel version that introduced it, establishing the minimum required size.
1640 *
1641 * ib_copy_validate_udata_in_cm() - The above but also validate a
1642 * comp_mask member only has supported bits set::
1643 *
1644 * err = ib_copy_validate_udata_in_cm(udata, req, first_version_last_member,
1645 * DRIVER_CREATE_CQ_MASK_FEATURE_A |
1646 * DRIVER_CREATE_CQ_MASK_FEATURE_B);
1647 *
1648 * ib_respond_udata() - Implements the response rules::
1649 *
1650 * struct driver_create_cq_resp resp = {};
1651 *
1652 * resp.some_field = value;
1653 * return ib_respond_udata(udata, resp);
1654 *
1655 * ib_is_udata_in_empty() - Used instead of ib_copy_validate_udata_in() if the
1656 * driver does not have a request structure::
1657 *
1658 * ret = ib_is_udata_in_empty(udata);
1659 * if (ret)
1660 * return ret;
1661 *
1662 * Similarly ib_respond_empty_udata() is used instead of ib_respond_udata() if
1663 * the driver does not have a response structure::
1664 *
1665 * return ib_respond_empty_udata(udata);
1666 */
1667struct ib_udata {
1668 const void __user *inbuf;
1669 void __user *outbuf;
1670 size_t inlen;
1671 size_t outlen;
1672};
1673
1674struct ib_pd {
1675 u32 local_dma_lkey;
1676 u32 flags;
1677 struct ib_device *device;
1678 struct ib_uobject *uobject;
1679 atomic_t usecnt; /* count all resources */
1680
1681 u32 unsafe_global_rkey;
1682
1683 /*
1684 * Implementation details of the RDMA core, don't use in drivers:
1685 */
1686 struct ib_mr *__internal_mr;
1687 struct rdma_restrack_entry res;
1688};
1689
1690struct ib_xrcd {
1691 struct ib_device *device;
1692 atomic_t usecnt; /* count all exposed resources */
1693 struct inode *inode;
1694 struct rw_semaphore tgt_qps_rwsem;
1695 struct xarray tgt_qps;
1696};
1697
1698struct ib_ah {
1699 struct ib_device *device;
1700 struct ib_pd *pd;
1701 struct ib_uobject *uobject;
1702 const struct ib_gid_attr *sgid_attr;
1703 enum rdma_ah_attr_type type;
1704};
1705
1706typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
1707
1708enum ib_poll_context {
1709 IB_POLL_SOFTIRQ, /* poll from softirq context */
1710 IB_POLL_WORKQUEUE, /* poll from workqueue */
1711 IB_POLL_UNBOUND_WORKQUEUE, /* poll from unbound workqueue */
1712 IB_POLL_LAST_POOL_TYPE = IB_POLL_UNBOUND_WORKQUEUE,
1713
1714 IB_POLL_DIRECT, /* caller context, no hw completions */
1715};
1716
1717struct ib_cq {
1718 struct ib_device *device;
1719 struct ib_ucq_object *uobject;
1720 ib_comp_handler comp_handler;
1721 void (*event_handler)(struct ib_event *, void *);
1722 void *cq_context;
1723 int cqe;
1724 unsigned int cqe_used;
1725 atomic_t usecnt; /* count number of work queues */
1726 enum ib_poll_context poll_ctx;
1727 struct ib_wc *wc;
1728 struct list_head pool_entry;
1729 union {
1730 struct irq_poll iop;
1731 struct work_struct work;
1732 };
1733 struct workqueue_struct *comp_wq;
1734 struct dim *dim;
1735
1736 /* updated only by trace points */
1737 ktime_t timestamp;
1738 u8 interrupt:1;
1739 u8 shared:1;
1740 unsigned int comp_vector;
1741 struct ib_umem *umem;
1742
1743 /*
1744 * Implementation details of the RDMA core, don't use in drivers:
1745 */
1746 struct rdma_restrack_entry res;
1747};
1748
1749struct ib_srq {
1750 struct ib_device *device;
1751 struct ib_pd *pd;
1752 struct ib_usrq_object *uobject;
1753 void (*event_handler)(struct ib_event *, void *);
1754 void *srq_context;
1755 enum ib_srq_type srq_type;
1756 atomic_t usecnt;
1757
1758 struct {
1759 struct ib_cq *cq;
1760 union {
1761 struct {
1762 struct ib_xrcd *xrcd;
1763 u32 srq_num;
1764 } xrc;
1765 };
1766 } ext;
1767
1768 /*
1769 * Implementation details of the RDMA core, don't use in drivers:
1770 */
1771 struct rdma_restrack_entry res;
1772};
1773
1774enum ib_raw_packet_caps {
1775 /*
1776 * Strip cvlan from incoming packet and report it in the matching work
1777 * completion is supported.
1778 */
1779 IB_RAW_PACKET_CAP_CVLAN_STRIPPING =
1780 IB_UVERBS_RAW_PACKET_CAP_CVLAN_STRIPPING,
1781 /*
1782 * Scatter FCS field of an incoming packet to host memory is supported.
1783 */
1784 IB_RAW_PACKET_CAP_SCATTER_FCS = IB_UVERBS_RAW_PACKET_CAP_SCATTER_FCS,
1785 /* Checksum offloads are supported (for both send and receive). */
1786 IB_RAW_PACKET_CAP_IP_CSUM = IB_UVERBS_RAW_PACKET_CAP_IP_CSUM,
1787 /*
1788 * When a packet is received for an RQ with no receive WQEs, the
1789 * packet processing is delayed.
1790 */
1791 IB_RAW_PACKET_CAP_DELAY_DROP = IB_UVERBS_RAW_PACKET_CAP_DELAY_DROP,
1792};
1793
1794enum ib_wq_type {
1795 IB_WQT_RQ = IB_UVERBS_WQT_RQ,
1796};
1797
1798enum ib_wq_state {
1799 IB_WQS_RESET,
1800 IB_WQS_RDY,
1801 IB_WQS_ERR
1802};
1803
1804struct ib_wq {
1805 struct ib_device *device;
1806 struct ib_uwq_object *uobject;
1807 void *wq_context;
1808 void (*event_handler)(struct ib_event *, void *);
1809 struct ib_pd *pd;
1810 struct ib_cq *cq;
1811 u32 wq_num;
1812 enum ib_wq_state state;
1813 enum ib_wq_type wq_type;
1814 atomic_t usecnt;
1815};
1816
1817enum ib_wq_flags {
1818 IB_WQ_FLAGS_CVLAN_STRIPPING = IB_UVERBS_WQ_FLAGS_CVLAN_STRIPPING,
1819 IB_WQ_FLAGS_SCATTER_FCS = IB_UVERBS_WQ_FLAGS_SCATTER_FCS,
1820 IB_WQ_FLAGS_DELAY_DROP = IB_UVERBS_WQ_FLAGS_DELAY_DROP,
1821 IB_WQ_FLAGS_PCI_WRITE_END_PADDING =
1822 IB_UVERBS_WQ_FLAGS_PCI_WRITE_END_PADDING,
1823};
1824
1825struct ib_wq_init_attr {
1826 void *wq_context;
1827 enum ib_wq_type wq_type;
1828 u32 max_wr;
1829 u32 max_sge;
1830 struct ib_cq *cq;
1831 void (*event_handler)(struct ib_event *, void *);
1832 u32 create_flags; /* Use enum ib_wq_flags */
1833};
1834
1835enum ib_wq_attr_mask {
1836 IB_WQ_STATE = 1 << 0,
1837 IB_WQ_CUR_STATE = 1 << 1,
1838 IB_WQ_FLAGS = 1 << 2,
1839};
1840
1841struct ib_wq_attr {
1842 enum ib_wq_state wq_state;
1843 enum ib_wq_state curr_wq_state;
1844 u32 flags; /* Use enum ib_wq_flags */
1845 u32 flags_mask; /* Use enum ib_wq_flags */
1846};
1847
1848struct ib_rwq_ind_table {
1849 struct ib_device *device;
1850 struct ib_uobject *uobject;
1851 atomic_t usecnt;
1852 u32 ind_tbl_num;
1853 u32 log_ind_tbl_size;
1854 struct ib_wq **ind_tbl;
1855};
1856
1857struct ib_rwq_ind_table_init_attr {
1858 u32 log_ind_tbl_size;
1859 /* Each entry is a pointer to Receive Work Queue */
1860 struct ib_wq **ind_tbl;
1861};
1862
1863enum port_pkey_state {
1864 IB_PORT_PKEY_NOT_VALID = 0,
1865 IB_PORT_PKEY_VALID = 1,
1866 IB_PORT_PKEY_LISTED = 2,
1867};
1868
1869struct ib_qp_security;
1870
1871struct ib_port_pkey {
1872 enum port_pkey_state state;
1873 u16 pkey_index;
1874 u32 port_num;
1875 struct list_head qp_list;
1876 struct list_head to_error_list;
1877 struct ib_qp_security *sec;
1878};
1879
1880struct ib_ports_pkeys {
1881 struct ib_port_pkey main;
1882 struct ib_port_pkey alt;
1883};
1884
1885struct ib_qp_security {
1886 struct ib_qp *qp;
1887 struct ib_device *dev;
1888 /* Hold this mutex when changing port and pkey settings. */
1889 struct mutex mutex;
1890 struct ib_ports_pkeys *ports_pkeys;
1891 /* A list of all open shared QP handles. Required to enforce security
1892 * properly for all users of a shared QP.
1893 */
1894 struct list_head shared_qp_list;
1895 void *security;
1896 bool destroying;
1897 atomic_t error_list_count;
1898 struct completion error_complete;
1899 int error_comps_pending;
1900};
1901
1902/*
1903 * @max_write_sge: Maximum SGE elements per RDMA WRITE request.
1904 * @max_read_sge: Maximum SGE elements per RDMA READ request.
1905 */
1906struct ib_qp {
1907 struct ib_device *device;
1908 struct ib_pd *pd;
1909 struct ib_cq *send_cq;
1910 struct ib_cq *recv_cq;
1911 spinlock_t mr_lock;
1912 int mrs_used;
1913 struct list_head rdma_mrs;
1914 struct list_head sig_mrs;
1915 struct ib_srq *srq;
1916 struct completion srq_completion;
1917 struct ib_xrcd *xrcd; /* XRC TGT QPs only */
1918 struct list_head xrcd_list;
1919
1920 /* count times opened, mcast attaches, flow attaches */
1921 atomic_t usecnt;
1922 struct list_head open_list;
1923 struct ib_qp *real_qp;
1924 struct ib_uqp_object *uobject;
1925 void (*event_handler)(struct ib_event *, void *);
1926 void (*registered_event_handler)(struct ib_event *, void *);
1927 void *qp_context;
1928 /* sgid_attrs associated with the AV's */
1929 const struct ib_gid_attr *av_sgid_attr;
1930 const struct ib_gid_attr *alt_path_sgid_attr;
1931 u32 qp_num;
1932 u32 max_write_sge;
1933 u32 max_read_sge;
1934 enum ib_qp_type qp_type;
1935 struct ib_rwq_ind_table *rwq_ind_tbl;
1936 struct ib_qp_security *qp_sec;
1937 u32 port;
1938
1939 bool integrity_en;
1940 /*
1941 * Implementation details of the RDMA core, don't use in drivers:
1942 */
1943 struct rdma_restrack_entry res;
1944
1945 /* The counter the qp is bind to */
1946 struct rdma_counter *counter;
1947};
1948
1949struct ib_dm {
1950 struct ib_device *device;
1951 u32 length;
1952 u32 flags;
1953 struct ib_uobject *uobject;
1954 atomic_t usecnt;
1955};
1956
1957/* bit values to mark existence of ib_dmah fields */
1958enum {
1959 IB_DMAH_CPU_ID_EXISTS,
1960 IB_DMAH_MEM_TYPE_EXISTS,
1961 IB_DMAH_PH_EXISTS,
1962};
1963
1964struct ib_dmah {
1965 struct ib_device *device;
1966 struct ib_uobject *uobject;
1967 /*
1968 * Implementation details of the RDMA core, don't use in drivers:
1969 */
1970 struct rdma_restrack_entry res;
1971 u32 cpu_id;
1972 enum tph_mem_type mem_type;
1973 atomic_t usecnt;
1974 u8 ph;
1975 u8 valid_fields; /* use IB_DMAH_XXX_EXISTS */
1976};
1977
1978struct ib_mr {
1979 struct ib_device *device;
1980 struct ib_pd *pd;
1981 u32 lkey;
1982 u32 rkey;
1983 u64 iova;
1984 u64 length;
1985 unsigned int page_size;
1986 enum ib_mr_type type;
1987 bool need_inval;
1988 union {
1989 struct ib_uobject *uobject; /* user */
1990 struct list_head qp_entry; /* FR */
1991 };
1992
1993 struct ib_dm *dm;
1994 struct ib_sig_attrs *sig_attrs; /* only for IB_MR_TYPE_INTEGRITY MRs */
1995 struct ib_dmah *dmah;
1996 struct {
1997 struct ib_frmr_pool *pool;
1998 struct ib_frmr_key key;
1999 u32 handle;
2000 } frmr;
2001 /*
2002 * Implementation details of the RDMA core, don't use in drivers:
2003 */
2004 struct rdma_restrack_entry res;
2005};
2006
2007struct ib_mw {
2008 struct ib_device *device;
2009 struct ib_pd *pd;
2010 struct ib_uobject *uobject;
2011 u32 rkey;
2012 enum ib_mw_type type;
2013};
2014
2015/* Supported steering options */
2016enum ib_flow_attr_type {
2017 /* steering according to rule specifications */
2018 IB_FLOW_ATTR_NORMAL = 0x0,
2019 /* default unicast and multicast rule -
2020 * receive all Eth traffic which isn't steered to any QP
2021 */
2022 IB_FLOW_ATTR_ALL_DEFAULT = 0x1,
2023 /* default multicast rule -
2024 * receive all Eth multicast traffic which isn't steered to any QP
2025 */
2026 IB_FLOW_ATTR_MC_DEFAULT = 0x2,
2027 /* sniffer rule - receive all port traffic */
2028 IB_FLOW_ATTR_SNIFFER = 0x3
2029};
2030
2031/* Supported steering header types */
2032enum ib_flow_spec_type {
2033 /* L2 headers*/
2034 IB_FLOW_SPEC_ETH = 0x20,
2035 IB_FLOW_SPEC_IB = 0x22,
2036 /* L3 header*/
2037 IB_FLOW_SPEC_IPV4 = 0x30,
2038 IB_FLOW_SPEC_IPV6 = 0x31,
2039 IB_FLOW_SPEC_ESP = 0x34,
2040 /* L4 headers*/
2041 IB_FLOW_SPEC_TCP = 0x40,
2042 IB_FLOW_SPEC_UDP = 0x41,
2043 IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50,
2044 IB_FLOW_SPEC_GRE = 0x51,
2045 IB_FLOW_SPEC_MPLS = 0x60,
2046 IB_FLOW_SPEC_INNER = 0x100,
2047 /* Actions */
2048 IB_FLOW_SPEC_ACTION_TAG = 0x1000,
2049 IB_FLOW_SPEC_ACTION_DROP = 0x1001,
2050 IB_FLOW_SPEC_ACTION_HANDLE = 0x1002,
2051 IB_FLOW_SPEC_ACTION_COUNT = 0x1003,
2052};
2053#define IB_FLOW_SPEC_LAYER_MASK 0xF0
2054#define IB_FLOW_SPEC_SUPPORT_LAYERS 10
2055
2056enum ib_flow_flags {
2057 IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */
2058 IB_FLOW_ATTR_FLAGS_EGRESS = 1UL << 2, /* Egress flow */
2059 IB_FLOW_ATTR_FLAGS_RESERVED = 1UL << 3 /* Must be last */
2060};
2061
2062struct ib_flow_eth_filter {
2063 u8 dst_mac[6];
2064 u8 src_mac[6];
2065 __be16 ether_type;
2066 __be16 vlan_tag;
2067};
2068
2069struct ib_flow_spec_eth {
2070 u32 type;
2071 u16 size;
2072 struct ib_flow_eth_filter val;
2073 struct ib_flow_eth_filter mask;
2074};
2075
2076struct ib_flow_ib_filter {
2077 __be16 dlid;
2078 __u8 sl;
2079};
2080
2081struct ib_flow_spec_ib {
2082 u32 type;
2083 u16 size;
2084 struct ib_flow_ib_filter val;
2085 struct ib_flow_ib_filter mask;
2086};
2087
2088/* IPv4 header flags */
2089enum ib_ipv4_flags {
2090 IB_IPV4_DONT_FRAG = 0x2, /* Don't enable packet fragmentation */
2091 IB_IPV4_MORE_FRAG = 0X4 /* For All fragmented packets except the
2092 last have this flag set */
2093};
2094
2095struct ib_flow_ipv4_filter {
2096 __be32 src_ip;
2097 __be32 dst_ip;
2098 u8 proto;
2099 u8 tos;
2100 u8 ttl;
2101 u8 flags;
2102};
2103
2104struct ib_flow_spec_ipv4 {
2105 u32 type;
2106 u16 size;
2107 struct ib_flow_ipv4_filter val;
2108 struct ib_flow_ipv4_filter mask;
2109};
2110
2111struct ib_flow_ipv6_filter {
2112 u8 src_ip[16];
2113 u8 dst_ip[16];
2114 __be32 flow_label;
2115 u8 next_hdr;
2116 u8 traffic_class;
2117 u8 hop_limit;
2118} __packed;
2119
2120struct ib_flow_spec_ipv6 {
2121 u32 type;
2122 u16 size;
2123 struct ib_flow_ipv6_filter val;
2124 struct ib_flow_ipv6_filter mask;
2125};
2126
2127struct ib_flow_tcp_udp_filter {
2128 __be16 dst_port;
2129 __be16 src_port;
2130};
2131
2132struct ib_flow_spec_tcp_udp {
2133 u32 type;
2134 u16 size;
2135 struct ib_flow_tcp_udp_filter val;
2136 struct ib_flow_tcp_udp_filter mask;
2137};
2138
2139struct ib_flow_tunnel_filter {
2140 __be32 tunnel_id;
2141};
2142
2143/* ib_flow_spec_tunnel describes the Vxlan tunnel
2144 * the tunnel_id from val has the vni value
2145 */
2146struct ib_flow_spec_tunnel {
2147 u32 type;
2148 u16 size;
2149 struct ib_flow_tunnel_filter val;
2150 struct ib_flow_tunnel_filter mask;
2151};
2152
2153struct ib_flow_esp_filter {
2154 __be32 spi;
2155 __be32 seq;
2156};
2157
2158struct ib_flow_spec_esp {
2159 u32 type;
2160 u16 size;
2161 struct ib_flow_esp_filter val;
2162 struct ib_flow_esp_filter mask;
2163};
2164
2165struct ib_flow_gre_filter {
2166 __be16 c_ks_res0_ver;
2167 __be16 protocol;
2168 __be32 key;
2169};
2170
2171struct ib_flow_spec_gre {
2172 u32 type;
2173 u16 size;
2174 struct ib_flow_gre_filter val;
2175 struct ib_flow_gre_filter mask;
2176};
2177
2178struct ib_flow_mpls_filter {
2179 __be32 tag;
2180};
2181
2182struct ib_flow_spec_mpls {
2183 u32 type;
2184 u16 size;
2185 struct ib_flow_mpls_filter val;
2186 struct ib_flow_mpls_filter mask;
2187};
2188
2189struct ib_flow_spec_action_tag {
2190 enum ib_flow_spec_type type;
2191 u16 size;
2192 u32 tag_id;
2193};
2194
2195struct ib_flow_spec_action_drop {
2196 enum ib_flow_spec_type type;
2197 u16 size;
2198};
2199
2200struct ib_flow_spec_action_handle {
2201 enum ib_flow_spec_type type;
2202 u16 size;
2203 struct ib_flow_action *act;
2204};
2205
2206enum ib_counters_description {
2207 IB_COUNTER_PACKETS,
2208 IB_COUNTER_BYTES,
2209};
2210
2211struct ib_flow_spec_action_count {
2212 enum ib_flow_spec_type type;
2213 u16 size;
2214 struct ib_counters *counters;
2215};
2216
2217union ib_flow_spec {
2218 struct {
2219 u32 type;
2220 u16 size;
2221 };
2222 struct ib_flow_spec_eth eth;
2223 struct ib_flow_spec_ib ib;
2224 struct ib_flow_spec_ipv4 ipv4;
2225 struct ib_flow_spec_tcp_udp tcp_udp;
2226 struct ib_flow_spec_ipv6 ipv6;
2227 struct ib_flow_spec_tunnel tunnel;
2228 struct ib_flow_spec_esp esp;
2229 struct ib_flow_spec_gre gre;
2230 struct ib_flow_spec_mpls mpls;
2231 struct ib_flow_spec_action_tag flow_tag;
2232 struct ib_flow_spec_action_drop drop;
2233 struct ib_flow_spec_action_handle action;
2234 struct ib_flow_spec_action_count flow_count;
2235};
2236
2237struct ib_flow_attr {
2238 enum ib_flow_attr_type type;
2239 u16 size;
2240 u16 priority;
2241 u32 flags;
2242 u8 num_of_specs;
2243 u32 port;
2244 union ib_flow_spec flows[];
2245};
2246
2247struct ib_flow {
2248 struct ib_qp *qp;
2249 struct ib_device *device;
2250 struct ib_uobject *uobject;
2251};
2252
2253enum ib_flow_action_type {
2254 IB_FLOW_ACTION_UNSPECIFIED,
2255 IB_FLOW_ACTION_ESP = 1,
2256};
2257
2258struct ib_flow_action_attrs_esp_keymats {
2259 enum ib_uverbs_flow_action_esp_keymat protocol;
2260 union {
2261 struct ib_uverbs_flow_action_esp_keymat_aes_gcm aes_gcm;
2262 } keymat;
2263};
2264
2265struct ib_flow_action_attrs_esp_replays {
2266 enum ib_uverbs_flow_action_esp_replay protocol;
2267 union {
2268 struct ib_uverbs_flow_action_esp_replay_bmp bmp;
2269 } replay;
2270};
2271
2272enum ib_flow_action_attrs_esp_flags {
2273 /* All user-space flags at the top: Use enum ib_uverbs_flow_action_esp_flags
2274 * This is done in order to share the same flags between user-space and
2275 * kernel and spare an unnecessary translation.
2276 */
2277
2278 /* Kernel flags */
2279 IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED = 1ULL << 32,
2280 IB_FLOW_ACTION_ESP_FLAGS_MOD_ESP_ATTRS = 1ULL << 33,
2281};
2282
2283struct ib_flow_spec_list {
2284 struct ib_flow_spec_list *next;
2285 union ib_flow_spec spec;
2286};
2287
2288struct ib_flow_action_attrs_esp {
2289 struct ib_flow_action_attrs_esp_keymats *keymat;
2290 struct ib_flow_action_attrs_esp_replays *replay;
2291 struct ib_flow_spec_list *encap;
2292 /* Used only if IB_FLOW_ACTION_ESP_FLAGS_ESN_TRIGGERED is enabled.
2293 * Value of 0 is a valid value.
2294 */
2295 u32 esn;
2296 u32 spi;
2297 u32 seq;
2298 u32 tfc_pad;
2299 /* Use enum ib_flow_action_attrs_esp_flags */
2300 u64 flags;
2301 u64 hard_limit_pkts;
2302};
2303
2304struct ib_flow_action {
2305 struct ib_device *device;
2306 struct ib_uobject *uobject;
2307 enum ib_flow_action_type type;
2308 atomic_t usecnt;
2309};
2310
2311struct ib_mad;
2312
2313enum ib_process_mad_flags {
2314 IB_MAD_IGNORE_MKEY = 1,
2315 IB_MAD_IGNORE_BKEY = 2,
2316 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
2317};
2318
2319enum ib_mad_result {
2320 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */
2321 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */
2322 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */
2323 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */
2324};
2325
2326struct ib_port_cache {
2327 u64 subnet_prefix;
2328 struct ib_pkey_cache *pkey;
2329 struct ib_gid_table *gid;
2330 u8 lmc;
2331 enum ib_port_state port_state;
2332 enum ib_port_state last_port_state;
2333};
2334
2335struct ib_port_immutable {
2336 int pkey_tbl_len;
2337 int gid_tbl_len;
2338 u32 core_cap_flags;
2339 u32 max_mad_size;
2340};
2341
2342struct ib_port_data {
2343 struct ib_device *ib_dev;
2344
2345 struct ib_port_immutable immutable;
2346
2347 spinlock_t pkey_list_lock;
2348
2349 spinlock_t netdev_lock;
2350
2351 struct list_head pkey_list;
2352
2353 struct ib_port_cache cache;
2354
2355 struct net_device __rcu *netdev;
2356 netdevice_tracker netdev_tracker;
2357 struct hlist_node ndev_hash_link;
2358 struct rdma_port_counter port_counter;
2359 struct ib_port *sysfs;
2360};
2361
2362/* rdma netdev type - specifies protocol type */
2363enum rdma_netdev_t {
2364 RDMA_NETDEV_IPOIB,
2365};
2366
2367/**
2368 * struct rdma_netdev - rdma netdev
2369 * For cases where netstack interfacing is required.
2370 */
2371struct rdma_netdev {
2372 void *clnt_priv;
2373 struct ib_device *hca;
2374 u32 port_num;
2375 int mtu;
2376
2377 void (*free_rdma_netdev)(struct net_device *netdev);
2378
2379 /* control functions */
2380 void (*set_id)(struct net_device *netdev, int id);
2381 /* send packet */
2382 int (*send)(struct net_device *dev, struct sk_buff *skb,
2383 struct ib_ah *address, u32 dqpn);
2384 /* multicast */
2385 int (*attach_mcast)(struct net_device *dev, struct ib_device *hca,
2386 union ib_gid *gid, u16 mlid,
2387 int set_qkey, u32 qkey);
2388 int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
2389 union ib_gid *gid, u16 mlid);
2390 /* timeout */
2391 void (*tx_timeout)(struct net_device *dev, unsigned int txqueue);
2392};
2393
2394struct rdma_netdev_alloc_params {
2395 size_t sizeof_priv;
2396 unsigned int txqs;
2397 unsigned int rxqs;
2398 void *param;
2399
2400 int (*initialize_rdma_netdev)(struct ib_device *device, u32 port_num,
2401 struct net_device *netdev, void *param);
2402};
2403
2404struct ib_odp_counters {
2405 atomic64_t faults;
2406 atomic64_t faults_handled;
2407 atomic64_t invalidations;
2408 atomic64_t invalidations_handled;
2409 atomic64_t prefetch;
2410};
2411
2412struct ib_counters {
2413 struct ib_device *device;
2414 struct ib_uobject *uobject;
2415 /* num of objects attached */
2416 atomic_t usecnt;
2417};
2418
2419struct ib_counters_read_attr {
2420 u64 *counters_buff;
2421 u32 ncounters;
2422 u32 flags; /* use enum ib_read_counters_flags */
2423};
2424
2425struct uverbs_attr_bundle;
2426struct iw_cm_id;
2427struct iw_cm_conn_param;
2428
2429#define INIT_RDMA_OBJ_SIZE(ib_struct, drv_struct, member) \
2430 .size_##ib_struct = \
2431 (sizeof(struct drv_struct) + \
2432 BUILD_BUG_ON_ZERO(offsetof(struct drv_struct, member)) + \
2433 BUILD_BUG_ON_ZERO( \
2434 !__same_type(((struct drv_struct *)NULL)->member, \
2435 struct ib_struct)))
2436
2437#define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp) \
2438 ((struct ib_type *)rdma_zalloc_obj(ib_dev, ib_dev->ops.size_##ib_type, \
2439 gfp, false))
2440
2441#define rdma_zalloc_drv_obj_numa(ib_dev, ib_type) \
2442 ((struct ib_type *)rdma_zalloc_obj(ib_dev, ib_dev->ops.size_##ib_type, \
2443 GFP_KERNEL, true))
2444
2445#define rdma_zalloc_drv_obj(ib_dev, ib_type) \
2446 rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, GFP_KERNEL)
2447
2448#define DECLARE_RDMA_OBJ_SIZE(ib_struct) size_t size_##ib_struct
2449
2450struct rdma_user_mmap_entry {
2451 struct kref ref;
2452 struct ib_ucontext *ucontext;
2453 unsigned long start_pgoff;
2454 size_t npages;
2455 bool driver_removed;
2456 /* protects access to dmabufs */
2457 struct mutex dmabufs_lock;
2458 struct list_head dmabufs;
2459};
2460
2461/* Return the offset (in bytes) the user should pass to libc's mmap() */
2462static inline u64
2463rdma_user_mmap_get_offset(const struct rdma_user_mmap_entry *entry)
2464{
2465 return (u64)entry->start_pgoff << PAGE_SHIFT;
2466}
2467
2468/**
2469 * struct ib_device_ops - InfiniBand device operations
2470 * This structure defines all the InfiniBand device operations, providers will
2471 * need to define the supported operations, otherwise they will be set to null.
2472 */
2473struct ib_device_ops {
2474 struct module *owner;
2475 enum rdma_driver_id driver_id;
2476 u32 uverbs_abi_ver;
2477 unsigned int uverbs_no_driver_id_binding:1;
2478 /*
2479 * Indicates the driver checks every op accepting a udata for the
2480 * correct size on input and always handles the output using the udata
2481 * helpers.
2482 */
2483 unsigned int uverbs_robust_udata:1;
2484
2485 /*
2486 * NOTE: New drivers should not make use of device_group; instead new
2487 * device parameter should be exposed via netlink command. This
2488 * mechanism exists only for existing drivers.
2489 */
2490 const struct attribute_group *device_group;
2491 const struct attribute_group **port_groups;
2492
2493 int (*post_send)(struct ib_qp *qp, const struct ib_send_wr *send_wr,
2494 const struct ib_send_wr **bad_send_wr);
2495 int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
2496 const struct ib_recv_wr **bad_recv_wr);
2497 void (*drain_rq)(struct ib_qp *qp);
2498 void (*drain_sq)(struct ib_qp *qp);
2499 int (*poll_cq)(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
2500 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
2501 int (*req_notify_cq)(struct ib_cq *cq, enum ib_cq_notify_flags flags);
2502 int (*post_srq_recv)(struct ib_srq *srq,
2503 const struct ib_recv_wr *recv_wr,
2504 const struct ib_recv_wr **bad_recv_wr);
2505 int (*process_mad)(struct ib_device *device, int process_mad_flags,
2506 u32 port_num, const struct ib_wc *in_wc,
2507 const struct ib_grh *in_grh,
2508 const struct ib_mad *in_mad, struct ib_mad *out_mad,
2509 size_t *out_mad_size, u16 *out_mad_pkey_index);
2510 int (*query_device)(struct ib_device *device,
2511 struct ib_device_attr *device_attr,
2512 struct ib_udata *udata);
2513 int (*modify_device)(struct ib_device *device, int device_modify_mask,
2514 struct ib_device_modify *device_modify);
2515 void (*get_dev_fw_str)(struct ib_device *device, char *str);
2516 int (*query_port)(struct ib_device *device, u32 port_num,
2517 struct ib_port_attr *port_attr);
2518 int (*query_port_speed)(struct ib_device *device, u32 port_num,
2519 u64 *speed);
2520 int (*modify_port)(struct ib_device *device, u32 port_num,
2521 int port_modify_mask,
2522 struct ib_port_modify *port_modify);
2523 /*
2524 * The following mandatory functions are used only at device
2525 * registration. Keep functions such as these at the end of this
2526 * structure to avoid cache line misses when accessing struct ib_device
2527 * in fast paths.
2528 */
2529 int (*get_port_immutable)(struct ib_device *device, u32 port_num,
2530 struct ib_port_immutable *immutable);
2531 enum rdma_link_layer (*get_link_layer)(struct ib_device *device,
2532 u32 port_num);
2533 /*
2534 * When calling get_netdev, the HW vendor's driver should return the
2535 * net device of device @device at port @port_num or NULL if such
2536 * a net device doesn't exist. The vendor driver should call dev_hold
2537 * on this net device. The HW vendor's device driver must guarantee
2538 * that this function returns NULL before the net device has finished
2539 * NETDEV_UNREGISTER state.
2540 */
2541 struct net_device *(*get_netdev)(struct ib_device *device,
2542 u32 port_num);
2543 /*
2544 * rdma netdev operation
2545 *
2546 * Driver implementing alloc_rdma_netdev or rdma_netdev_get_params
2547 * must return -EOPNOTSUPP if it doesn't support the specified type.
2548 */
2549 struct net_device *(*alloc_rdma_netdev)(
2550 struct ib_device *device, u32 port_num, enum rdma_netdev_t type,
2551 const char *name, unsigned char name_assign_type,
2552 void (*setup)(struct net_device *));
2553
2554 int (*rdma_netdev_get_params)(struct ib_device *device, u32 port_num,
2555 enum rdma_netdev_t type,
2556 struct rdma_netdev_alloc_params *params);
2557 /*
2558 * query_gid should be return GID value for @device, when @port_num
2559 * link layer is either IB or iWarp. It is no-op if @port_num port
2560 * is RoCE link layer.
2561 */
2562 int (*query_gid)(struct ib_device *device, u32 port_num, int index,
2563 union ib_gid *gid);
2564 /*
2565 * When calling add_gid, the HW vendor's driver should add the gid
2566 * of device of port at gid index available at @attr. Meta-info of
2567 * that gid (for example, the network device related to this gid) is
2568 * available at @attr. @context allows the HW vendor driver to store
2569 * extra information together with a GID entry. The HW vendor driver may
2570 * allocate memory to contain this information and store it in @context
2571 * when a new GID entry is written to. Params are consistent until the
2572 * next call of add_gid or delete_gid. The function should return 0 on
2573 * success or error otherwise. The function could be called
2574 * concurrently for different ports. This function is only called when
2575 * roce_gid_table is used.
2576 */
2577 int (*add_gid)(const struct ib_gid_attr *attr, void **context);
2578 /*
2579 * When calling del_gid, the HW vendor's driver should delete the
2580 * gid of device @device at gid index gid_index of port port_num
2581 * available in @attr.
2582 * Upon the deletion of a GID entry, the HW vendor must free any
2583 * allocated memory. The caller will clear @context afterwards.
2584 * This function is only called when roce_gid_table is used.
2585 */
2586 int (*del_gid)(const struct ib_gid_attr *attr, void **context);
2587 int (*query_pkey)(struct ib_device *device, u32 port_num, u16 index,
2588 u16 *pkey);
2589 int (*alloc_ucontext)(struct ib_ucontext *context,
2590 struct ib_udata *udata);
2591 void (*dealloc_ucontext)(struct ib_ucontext *context);
2592 int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma);
2593 /*
2594 * This will be called once refcount of an entry in mmap_xa reaches
2595 * zero. The type of the memory that was mapped may differ between
2596 * entries and is opaque to the rdma_user_mmap interface.
2597 * Therefore needs to be implemented by the driver in mmap_free.
2598 */
2599 void (*mmap_free)(struct rdma_user_mmap_entry *entry);
2600 int (*mmap_get_pfns)(struct rdma_user_mmap_entry *entry,
2601 struct phys_vec *phys_vec,
2602 struct p2pdma_provider **provider);
2603 struct rdma_user_mmap_entry *(*pgoff_to_mmap_entry)(struct ib_ucontext *ucontext,
2604 off_t pg_off);
2605 void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
2606 int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
2607 int (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
2608 int (*create_ah)(struct ib_ah *ah, struct rdma_ah_init_attr *attr,
2609 struct ib_udata *udata);
2610 int (*create_user_ah)(struct ib_ah *ah, struct rdma_ah_init_attr *attr,
2611 struct ib_udata *udata);
2612 int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2613 int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
2614 int (*destroy_ah)(struct ib_ah *ah, u32 flags);
2615 int (*create_srq)(struct ib_srq *srq,
2616 struct ib_srq_init_attr *srq_init_attr,
2617 struct ib_udata *udata);
2618 int (*modify_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
2619 enum ib_srq_attr_mask srq_attr_mask,
2620 struct ib_udata *udata);
2621 int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
2622 int (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata);
2623 int (*create_qp)(struct ib_qp *qp, struct ib_qp_init_attr *qp_init_attr,
2624 struct ib_udata *udata);
2625 int (*modify_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2626 int qp_attr_mask, struct ib_udata *udata);
2627 int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
2628 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
2629 int (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata);
2630 int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr,
2631 struct uverbs_attr_bundle *attrs);
2632 int (*create_user_cq)(struct ib_cq *cq,
2633 const struct ib_cq_init_attr *attr,
2634 struct uverbs_attr_bundle *attrs);
2635 int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
2636 int (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
2637 int (*resize_user_cq)(struct ib_cq *cq, unsigned int cqe,
2638 struct ib_udata *udata);
2639 /*
2640 * pre_destroy_cq - Prevent a cq from generating any new work
2641 * completions, but not free any kernel resources
2642 */
2643 int (*pre_destroy_cq)(struct ib_cq *cq);
2644 /*
2645 * post_destroy_cq - Free all kernel resources
2646 */
2647 void (*post_destroy_cq)(struct ib_cq *cq);
2648 struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags);
2649 struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length,
2650 u64 virt_addr, int mr_access_flags,
2651 struct ib_dmah *dmah,
2652 struct ib_udata *udata);
2653 struct ib_mr *(*reg_user_mr_dmabuf)(struct ib_pd *pd, u64 offset,
2654 u64 length, u64 virt_addr, int fd,
2655 int mr_access_flags,
2656 struct ib_dmah *dmah,
2657 struct uverbs_attr_bundle *attrs);
2658 struct ib_mr *(*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start,
2659 u64 length, u64 virt_addr,
2660 int mr_access_flags, struct ib_pd *pd,
2661 struct ib_udata *udata);
2662 int (*dereg_mr)(struct ib_mr *mr, struct ib_udata *udata);
2663 struct ib_mr *(*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type,
2664 u32 max_num_sg);
2665 struct ib_mr *(*alloc_mr_integrity)(struct ib_pd *pd,
2666 u32 max_num_data_sg,
2667 u32 max_num_meta_sg);
2668 int (*advise_mr)(struct ib_pd *pd,
2669 enum ib_uverbs_advise_mr_advice advice, u32 flags,
2670 struct ib_sge *sg_list, u32 num_sge,
2671 struct uverbs_attr_bundle *attrs);
2672
2673 /*
2674 * Kernel users should universally support relaxed ordering (RO), as
2675 * they are designed to read data only after observing the CQE and use
2676 * the DMA API correctly.
2677 *
2678 * Some drivers implicitly enable RO if platform supports it.
2679 */
2680 int (*map_mr_sg)(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
2681 unsigned int *sg_offset);
2682 int (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
2683 struct ib_mr_status *mr_status);
2684 int (*alloc_mw)(struct ib_mw *mw, struct ib_udata *udata);
2685 int (*dealloc_mw)(struct ib_mw *mw);
2686 int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2687 int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2688 int (*alloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
2689 int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
2690 struct ib_flow *(*create_flow)(struct ib_qp *qp,
2691 struct ib_flow_attr *flow_attr,
2692 struct ib_udata *udata);
2693 int (*destroy_flow)(struct ib_flow *flow_id);
2694 int (*destroy_flow_action)(struct ib_flow_action *action);
2695 int (*set_vf_link_state)(struct ib_device *device, int vf, u32 port,
2696 int state);
2697 int (*get_vf_config)(struct ib_device *device, int vf, u32 port,
2698 struct ifla_vf_info *ivf);
2699 int (*get_vf_stats)(struct ib_device *device, int vf, u32 port,
2700 struct ifla_vf_stats *stats);
2701 int (*get_vf_guid)(struct ib_device *device, int vf, u32 port,
2702 struct ifla_vf_guid *node_guid,
2703 struct ifla_vf_guid *port_guid);
2704 int (*set_vf_guid)(struct ib_device *device, int vf, u32 port, u64 guid,
2705 int type);
2706 struct ib_wq *(*create_wq)(struct ib_pd *pd,
2707 struct ib_wq_init_attr *init_attr,
2708 struct ib_udata *udata);
2709 int (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
2710 int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr,
2711 u32 wq_attr_mask, struct ib_udata *udata);
2712 int (*create_rwq_ind_table)(struct ib_rwq_ind_table *ib_rwq_ind_table,
2713 struct ib_rwq_ind_table_init_attr *init_attr,
2714 struct ib_udata *udata);
2715 int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
2716 struct ib_dm *(*alloc_dm)(struct ib_device *device,
2717 struct ib_ucontext *context,
2718 struct ib_dm_alloc_attr *attr,
2719 struct uverbs_attr_bundle *attrs);
2720 int (*dealloc_dm)(struct ib_dm *dm, struct uverbs_attr_bundle *attrs);
2721 int (*alloc_dmah)(struct ib_dmah *ibdmah,
2722 struct uverbs_attr_bundle *attrs);
2723 int (*dealloc_dmah)(struct ib_dmah *dmah, struct uverbs_attr_bundle *attrs);
2724 struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
2725 struct ib_dm_mr_attr *attr,
2726 struct uverbs_attr_bundle *attrs);
2727 int (*create_counters)(struct ib_counters *counters,
2728 struct uverbs_attr_bundle *attrs);
2729 int (*destroy_counters)(struct ib_counters *counters);
2730 int (*read_counters)(struct ib_counters *counters,
2731 struct ib_counters_read_attr *counters_read_attr,
2732 struct uverbs_attr_bundle *attrs);
2733 int (*map_mr_sg_pi)(struct ib_mr *mr, struct scatterlist *data_sg,
2734 int data_sg_nents, unsigned int *data_sg_offset,
2735 struct scatterlist *meta_sg, int meta_sg_nents,
2736 unsigned int *meta_sg_offset);
2737
2738 /*
2739 * alloc_hw_[device,port]_stats - Allocate a struct rdma_hw_stats and
2740 * fill in the driver initialized data. The struct is kfree()'ed by
2741 * the sysfs core when the device is removed. A lifespan of -1 in the
2742 * return struct tells the core to set a default lifespan.
2743 */
2744 struct rdma_hw_stats *(*alloc_hw_device_stats)(struct ib_device *device);
2745 struct rdma_hw_stats *(*alloc_hw_port_stats)(struct ib_device *device,
2746 u32 port_num);
2747 /*
2748 * get_hw_stats - Fill in the counter value(s) in the stats struct.
2749 * @index - The index in the value array we wish to have updated, or
2750 * num_counters if we want all stats updated
2751 * Return codes -
2752 * < 0 - Error, no counters updated
2753 * index - Updated the single counter pointed to by index
2754 * num_counters - Updated all counters (will reset the timestamp
2755 * and prevent further calls for lifespan milliseconds)
2756 * Drivers are allowed to update all counters in leiu of just the
2757 * one given in index at their option
2758 */
2759 int (*get_hw_stats)(struct ib_device *device,
2760 struct rdma_hw_stats *stats, u32 port, int index);
2761
2762 /*
2763 * modify_hw_stat - Modify the counter configuration
2764 * @enable: true/false when enable/disable a counter
2765 * Return codes - 0 on success or error code otherwise.
2766 */
2767 int (*modify_hw_stat)(struct ib_device *device, u32 port,
2768 unsigned int counter_index, bool enable);
2769 /*
2770 * Allows rdma drivers to add their own restrack attributes.
2771 */
2772 int (*fill_res_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr);
2773 int (*fill_res_mr_entry_raw)(struct sk_buff *msg, struct ib_mr *ibmr);
2774 int (*fill_res_cq_entry)(struct sk_buff *msg, struct ib_cq *ibcq);
2775 int (*fill_res_cq_entry_raw)(struct sk_buff *msg, struct ib_cq *ibcq);
2776 int (*fill_res_qp_entry)(struct sk_buff *msg, struct ib_qp *ibqp);
2777 int (*fill_res_qp_entry_raw)(struct sk_buff *msg, struct ib_qp *ibqp);
2778 int (*fill_res_cm_id_entry)(struct sk_buff *msg, struct rdma_cm_id *id);
2779 int (*fill_res_srq_entry)(struct sk_buff *msg, struct ib_srq *ib_srq);
2780 int (*fill_res_srq_entry_raw)(struct sk_buff *msg, struct ib_srq *ib_srq);
2781
2782 /* Device lifecycle callbacks */
2783 /*
2784 * Called after the device becomes registered, before clients are
2785 * attached
2786 */
2787 int (*enable_driver)(struct ib_device *dev);
2788 /*
2789 * This is called as part of ib_dealloc_device().
2790 */
2791 void (*dealloc_driver)(struct ib_device *dev);
2792
2793 /* iWarp CM callbacks */
2794 void (*iw_add_ref)(struct ib_qp *qp);
2795 void (*iw_rem_ref)(struct ib_qp *qp);
2796 struct ib_qp *(*iw_get_qp)(struct ib_device *device, int qpn);
2797 int (*iw_connect)(struct iw_cm_id *cm_id,
2798 struct iw_cm_conn_param *conn_param);
2799 int (*iw_accept)(struct iw_cm_id *cm_id,
2800 struct iw_cm_conn_param *conn_param);
2801 int (*iw_reject)(struct iw_cm_id *cm_id, const void *pdata,
2802 u8 pdata_len);
2803 int (*iw_create_listen)(struct iw_cm_id *cm_id, int backlog);
2804 int (*iw_destroy_listen)(struct iw_cm_id *cm_id);
2805 /*
2806 * counter_bind_qp - Bind a QP to a counter.
2807 * @counter - The counter to be bound. If counter->id is zero then
2808 * the driver needs to allocate a new counter and set counter->id
2809 */
2810 int (*counter_bind_qp)(struct rdma_counter *counter, struct ib_qp *qp,
2811 u32 port);
2812 /*
2813 * counter_unbind_qp - Unbind the qp from the dynamically-allocated
2814 * counter and bind it onto the default one
2815 */
2816 int (*counter_unbind_qp)(struct ib_qp *qp, u32 port);
2817 /*
2818 * counter_dealloc -De-allocate the hw counter
2819 */
2820 int (*counter_dealloc)(struct rdma_counter *counter);
2821 /*
2822 * counter_alloc_stats - Allocate a struct rdma_hw_stats and fill in
2823 * the driver initialized data.
2824 */
2825 struct rdma_hw_stats *(*counter_alloc_stats)(
2826 struct rdma_counter *counter);
2827 /*
2828 * counter_update_stats - Query the stats value of this counter
2829 */
2830 int (*counter_update_stats)(struct rdma_counter *counter);
2831
2832 /*
2833 * counter_init - Initialize the driver specific rdma counter struct.
2834 */
2835 void (*counter_init)(struct rdma_counter *counter);
2836
2837 /*
2838 * Allows rdma drivers to add their own restrack attributes
2839 * dumped via 'rdma stat' iproute2 command.
2840 */
2841 int (*fill_stat_mr_entry)(struct sk_buff *msg, struct ib_mr *ibmr);
2842
2843 /* query driver for its ucontext properties */
2844 int (*query_ucontext)(struct ib_ucontext *context,
2845 struct uverbs_attr_bundle *attrs);
2846
2847 /*
2848 * Provide NUMA node. This API exists for rdmavt/hfi1 only.
2849 * Everyone else relies on Linux memory management model.
2850 */
2851 int (*get_numa_node)(struct ib_device *dev);
2852
2853 /*
2854 * add_sub_dev - Add a sub IB device
2855 */
2856 struct ib_device *(*add_sub_dev)(struct ib_device *parent,
2857 enum rdma_nl_dev_type type,
2858 const char *name);
2859
2860 /*
2861 * del_sub_dev - Delete a sub IB device
2862 */
2863 void (*del_sub_dev)(struct ib_device *sub_dev);
2864
2865 /*
2866 * ufile_cleanup - Attempt to cleanup ubojects HW resources inside
2867 * the ufile.
2868 */
2869 void (*ufile_hw_cleanup)(struct ib_uverbs_file *ufile);
2870
2871 /*
2872 * report_port_event - Drivers need to implement this if they have
2873 * some private stuff to handle when link status changes.
2874 */
2875 void (*report_port_event)(struct ib_device *ibdev,
2876 struct net_device *ndev, unsigned long event);
2877
2878 DECLARE_RDMA_OBJ_SIZE(ib_ah);
2879 DECLARE_RDMA_OBJ_SIZE(ib_counters);
2880 DECLARE_RDMA_OBJ_SIZE(ib_cq);
2881 DECLARE_RDMA_OBJ_SIZE(ib_dmah);
2882 DECLARE_RDMA_OBJ_SIZE(ib_mw);
2883 DECLARE_RDMA_OBJ_SIZE(ib_pd);
2884 DECLARE_RDMA_OBJ_SIZE(ib_qp);
2885 DECLARE_RDMA_OBJ_SIZE(ib_rwq_ind_table);
2886 DECLARE_RDMA_OBJ_SIZE(ib_srq);
2887 DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
2888 DECLARE_RDMA_OBJ_SIZE(ib_xrcd);
2889 DECLARE_RDMA_OBJ_SIZE(rdma_counter);
2890};
2891
2892struct ib_core_device {
2893 /* device must be the first element in structure until,
2894 * union of ib_core_device and device exists in ib_device.
2895 */
2896 struct device dev;
2897 possible_net_t rdma_net;
2898 struct kobject *ports_kobj;
2899 struct list_head port_list;
2900 struct ib_device *owner; /* reach back to owner ib_device */
2901};
2902
2903struct rdma_restrack_root;
2904struct ib_device {
2905 /* Do not access @dma_device directly from ULP nor from HW drivers. */
2906 struct device *dma_device;
2907 struct ib_device_ops ops;
2908 char name[IB_DEVICE_NAME_MAX];
2909 struct rcu_head rcu_head;
2910
2911 struct list_head event_handler_list;
2912 /* Protects event_handler_list */
2913 struct rw_semaphore event_handler_rwsem;
2914
2915 /* Protects QP's event_handler calls and open_qp list */
2916 spinlock_t qp_open_list_lock;
2917
2918 struct rw_semaphore client_data_rwsem;
2919 struct xarray client_data;
2920 struct mutex unregistration_lock;
2921
2922 /* Synchronize GID, Pkey cache entries, subnet prefix, LMC */
2923 rwlock_t cache_lock;
2924 /**
2925 * port_data is indexed by port number
2926 */
2927 struct ib_port_data *port_data;
2928
2929 int num_comp_vectors;
2930
2931 union {
2932 struct device dev;
2933 struct ib_core_device coredev;
2934 };
2935
2936 /* First group is for device attributes,
2937 * Second group is for driver provided attributes (optional).
2938 * Third group is for the hw_stats
2939 * It is a NULL terminated array.
2940 */
2941 const struct attribute_group *groups[4];
2942 u8 hw_stats_attr_index;
2943
2944 u64 uverbs_cmd_mask;
2945
2946 char node_desc[IB_DEVICE_NODE_DESC_MAX];
2947 __be64 node_guid;
2948 u32 local_dma_lkey;
2949 u16 is_switch:1;
2950 /* Indicates kernel verbs support, should not be used in drivers */
2951 u16 kverbs_provider:1;
2952 /* CQ adaptive moderation (RDMA DIM) */
2953 u16 use_cq_dim:1;
2954 u8 node_type;
2955 u32 phys_port_cnt;
2956 struct ib_device_attr attrs;
2957 struct hw_stats_device_data *hw_stats_data;
2958
2959#ifdef CONFIG_CGROUP_RDMA
2960 struct rdmacg_device cg_device;
2961#endif
2962
2963 u32 index;
2964
2965 spinlock_t cq_pools_lock;
2966 struct list_head cq_pools[IB_POLL_LAST_POOL_TYPE + 1];
2967
2968 struct rdma_restrack_root *res;
2969
2970 const struct uapi_definition *driver_def;
2971
2972 /*
2973 * Positive refcount indicates that the device is currently
2974 * registered and cannot be unregistered.
2975 */
2976 refcount_t refcount;
2977 struct completion unreg_completion;
2978 struct work_struct unregistration_work;
2979
2980 const struct rdma_link_ops *link_ops;
2981
2982 /* Protects compat_devs xarray modifications */
2983 struct mutex compat_devs_mutex;
2984 /* Maintains compat devices for each net namespace */
2985 struct xarray compat_devs;
2986
2987 /* Used by iWarp CM */
2988 char iw_ifname[IFNAMSIZ];
2989 u32 iw_driver_flags;
2990 u32 lag_flags;
2991
2992 /* A parent device has a list of sub-devices */
2993 struct mutex subdev_lock;
2994 struct list_head subdev_list_head;
2995
2996 /* A sub device has a type and a parent */
2997 enum rdma_nl_dev_type type;
2998 struct ib_device *parent;
2999 struct list_head subdev_list;
3000
3001 enum rdma_nl_name_assign_type name_assign_type;
3002
3003 struct ib_frmr_pools *frmr_pools;
3004};
3005
3006static inline void *rdma_zalloc_obj(struct ib_device *dev, size_t size,
3007 gfp_t gfp, bool is_numa_aware)
3008{
3009 if (is_numa_aware && dev->ops.get_numa_node)
3010 return kzalloc_node(size, gfp, dev->ops.get_numa_node(dev));
3011
3012 return kzalloc(size, gfp);
3013}
3014
3015struct ib_client_nl_info;
3016struct ib_client {
3017 const char *name;
3018 int (*add)(struct ib_device *ibdev);
3019 void (*remove)(struct ib_device *, void *client_data);
3020 void (*rename)(struct ib_device *dev, void *client_data);
3021 int (*get_nl_info)(struct ib_device *ibdev, void *client_data,
3022 struct ib_client_nl_info *res);
3023 int (*get_global_nl_info)(struct ib_client_nl_info *res);
3024
3025 /* Returns the net_dev belonging to this ib_client and matching the
3026 * given parameters.
3027 * @dev: An RDMA device that the net_dev use for communication.
3028 * @port: A physical port number on the RDMA device.
3029 * @pkey: P_Key that the net_dev uses if applicable.
3030 * @gid: A GID that the net_dev uses to communicate.
3031 * @addr: An IP address the net_dev is configured with.
3032 * @client_data: The device's client data set by ib_set_client_data().
3033 *
3034 * An ib_client that implements a net_dev on top of RDMA devices
3035 * (such as IP over IB) should implement this callback, allowing the
3036 * rdma_cm module to find the right net_dev for a given request.
3037 *
3038 * The caller is responsible for calling dev_put on the returned
3039 * netdev. */
3040 struct net_device *(*get_net_dev_by_params)(
3041 struct ib_device *dev,
3042 u32 port,
3043 u16 pkey,
3044 const union ib_gid *gid,
3045 const struct sockaddr *addr,
3046 void *client_data);
3047
3048 refcount_t uses;
3049 struct completion uses_zero;
3050 u32 client_id;
3051
3052 /* kverbs are not required by the client */
3053 u8 no_kverbs_req:1;
3054};
3055
3056struct ib_device *_ib_alloc_device(size_t size, struct net *net);
3057#define ib_alloc_device(drv_struct, member) \
3058 container_of(_ib_alloc_device(sizeof(struct drv_struct) + \
3059 BUILD_BUG_ON_ZERO(offsetof( \
3060 struct drv_struct, member)), \
3061 &init_net), \
3062 struct drv_struct, member)
3063
3064#define ib_alloc_device_with_net(drv_struct, member, net) \
3065 container_of(_ib_alloc_device(sizeof(struct drv_struct) + \
3066 BUILD_BUG_ON_ZERO(offsetof( \
3067 struct drv_struct, member)), net), \
3068 struct drv_struct, member)
3069
3070void ib_dealloc_device(struct ib_device *device);
3071
3072void ib_get_device_fw_str(struct ib_device *device, char *str);
3073
3074int ib_register_device(struct ib_device *device, const char *name,
3075 struct device *dma_device);
3076void ib_unregister_device(struct ib_device *device);
3077void ib_unregister_driver(enum rdma_driver_id driver_id);
3078void ib_unregister_device_and_put(struct ib_device *device);
3079void ib_unregister_device_queued(struct ib_device *ib_dev);
3080
3081int ib_register_client (struct ib_client *client);
3082void ib_unregister_client(struct ib_client *client);
3083
3084/**
3085 * ib_get_client_data - Get IB client context
3086 * @device:Device to get context for
3087 * @client:Client to get context for
3088 *
3089 * ib_get_client_data() returns the client context data set with
3090 * ib_set_client_data(). This can only be called while the client is
3091 * registered to the device, once the ib_client remove() callback returns this
3092 * cannot be called.
3093 */
3094static inline void *ib_get_client_data(struct ib_device *device,
3095 struct ib_client *client)
3096{
3097 return xa_load(&device->client_data, client->client_id);
3098}
3099void ib_set_client_data(struct ib_device *device, struct ib_client *client,
3100 void *data);
3101void ib_set_device_ops(struct ib_device *device,
3102 const struct ib_device_ops *ops);
3103
3104int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
3105 unsigned long pfn, unsigned long size, pgprot_t prot,
3106 struct rdma_user_mmap_entry *entry);
3107int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext,
3108 struct rdma_user_mmap_entry *entry,
3109 size_t length);
3110int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext,
3111 struct rdma_user_mmap_entry *entry,
3112 size_t length, u32 min_pgoff,
3113 u32 max_pgoff);
3114
3115#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
3116void rdma_user_mmap_disassociate(struct ib_device *device);
3117#else
3118static inline void rdma_user_mmap_disassociate(struct ib_device *device)
3119{
3120}
3121#endif
3122
3123static inline int
3124rdma_user_mmap_entry_insert_exact(struct ib_ucontext *ucontext,
3125 struct rdma_user_mmap_entry *entry,
3126 size_t length, u32 pgoff)
3127{
3128 return rdma_user_mmap_entry_insert_range(ucontext, entry, length, pgoff,
3129 pgoff);
3130}
3131
3132struct rdma_user_mmap_entry *
3133rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext,
3134 unsigned long pgoff);
3135struct rdma_user_mmap_entry *
3136rdma_user_mmap_entry_get(struct ib_ucontext *ucontext,
3137 struct vm_area_struct *vma);
3138void rdma_user_mmap_entry_put(struct rdma_user_mmap_entry *entry);
3139
3140void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry *entry);
3141
3142static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
3143{
3144 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
3145}
3146
3147static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
3148{
3149 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
3150}
3151
3152static inline bool ib_is_buffer_cleared(const void __user *p,
3153 size_t len)
3154{
3155 bool ret;
3156 u8 *buf;
3157
3158 if (len > USHRT_MAX)
3159 return false;
3160
3161 buf = memdup_user(p, len);
3162 if (IS_ERR(buf))
3163 return false;
3164
3165 ret = !memchr_inv(buf, 0, len);
3166 kfree(buf);
3167 return ret;
3168}
3169
3170static inline bool ib_is_udata_cleared(struct ib_udata *udata,
3171 size_t offset,
3172 size_t len)
3173{
3174 return ib_is_buffer_cleared(udata->inbuf + offset, len);
3175}
3176
3177/**
3178 * ib_modify_qp_is_ok - Check that the supplied attribute mask
3179 * contains all required attributes and no attributes not allowed for
3180 * the given QP state transition.
3181 * @cur_state: Current QP state
3182 * @next_state: Next QP state
3183 * @type: QP type
3184 * @mask: Mask of supplied QP attributes
3185 *
3186 * This function is a helper function that a low-level driver's
3187 * modify_qp method can use to validate the consumer's input. It
3188 * checks that cur_state and next_state are valid QP states, that a
3189 * transition from cur_state to next_state is allowed by the IB spec,
3190 * and that the attribute mask supplied is allowed for the transition.
3191 */
3192bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
3193 enum ib_qp_type type, enum ib_qp_attr_mask mask);
3194
3195void ib_register_event_handler(struct ib_event_handler *event_handler);
3196void ib_unregister_event_handler(struct ib_event_handler *event_handler);
3197void ib_dispatch_event(const struct ib_event *event);
3198
3199int ib_query_port(struct ib_device *device,
3200 u32 port_num, struct ib_port_attr *port_attr);
3201
3202enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device,
3203 u32 port_num);
3204
3205/**
3206 * rdma_cap_ib_switch - Check if the device is IB switch
3207 * @device: Device to check
3208 *
3209 * Device driver is responsible for setting is_switch bit on
3210 * in ib_device structure at init time.
3211 *
3212 * Return: true if the device is IB switch.
3213 */
3214static inline bool rdma_cap_ib_switch(const struct ib_device *device)
3215{
3216 return device->is_switch;
3217}
3218
3219/**
3220 * rdma_start_port - Return the first valid port number for the device
3221 * specified
3222 *
3223 * @device: Device to be checked
3224 *
3225 * Return start port number
3226 */
3227static inline u32 rdma_start_port(const struct ib_device *device)
3228{
3229 return rdma_cap_ib_switch(device) ? 0 : 1;
3230}
3231
3232/**
3233 * rdma_for_each_port - Iterate over all valid port numbers of the IB device
3234 * @device: The struct ib_device * to iterate over
3235 * @iter: The unsigned int to store the port number
3236 */
3237#define rdma_for_each_port(device, iter) \
3238 for (iter = rdma_start_port(device + \
3239 BUILD_BUG_ON_ZERO(!__same_type(u32, \
3240 iter))); \
3241 iter <= rdma_end_port(device); iter++)
3242
3243/**
3244 * rdma_end_port - Return the last valid port number for the device
3245 * specified
3246 *
3247 * @device: Device to be checked
3248 *
3249 * Return last port number
3250 */
3251static inline u32 rdma_end_port(const struct ib_device *device)
3252{
3253 return rdma_cap_ib_switch(device) ? 0 : device->phys_port_cnt;
3254}
3255
3256static inline int rdma_is_port_valid(const struct ib_device *device,
3257 unsigned int port)
3258{
3259 return (port >= rdma_start_port(device) &&
3260 port <= rdma_end_port(device));
3261}
3262
3263static inline bool rdma_is_grh_required(const struct ib_device *device,
3264 u32 port_num)
3265{
3266 return device->port_data[port_num].immutable.core_cap_flags &
3267 RDMA_CORE_PORT_IB_GRH_REQUIRED;
3268}
3269
3270static inline bool rdma_protocol_ib(const struct ib_device *device,
3271 u32 port_num)
3272{
3273 return device->port_data[port_num].immutable.core_cap_flags &
3274 RDMA_CORE_CAP_PROT_IB;
3275}
3276
3277static inline bool rdma_protocol_roce(const struct ib_device *device,
3278 u32 port_num)
3279{
3280 return device->port_data[port_num].immutable.core_cap_flags &
3281 (RDMA_CORE_CAP_PROT_ROCE | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP);
3282}
3283
3284static inline bool rdma_protocol_roce_udp_encap(const struct ib_device *device,
3285 u32 port_num)
3286{
3287 return device->port_data[port_num].immutable.core_cap_flags &
3288 RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
3289}
3290
3291static inline bool rdma_protocol_roce_eth_encap(const struct ib_device *device,
3292 u32 port_num)
3293{
3294 return device->port_data[port_num].immutable.core_cap_flags &
3295 RDMA_CORE_CAP_PROT_ROCE;
3296}
3297
3298static inline bool rdma_protocol_iwarp(const struct ib_device *device,
3299 u32 port_num)
3300{
3301 return device->port_data[port_num].immutable.core_cap_flags &
3302 RDMA_CORE_CAP_PROT_IWARP;
3303}
3304
3305static inline bool rdma_ib_or_roce(const struct ib_device *device,
3306 u32 port_num)
3307{
3308 return rdma_protocol_ib(device, port_num) ||
3309 rdma_protocol_roce(device, port_num);
3310}
3311
3312static inline bool rdma_protocol_raw_packet(const struct ib_device *device,
3313 u32 port_num)
3314{
3315 return device->port_data[port_num].immutable.core_cap_flags &
3316 RDMA_CORE_CAP_PROT_RAW_PACKET;
3317}
3318
3319static inline bool rdma_protocol_usnic(const struct ib_device *device,
3320 u32 port_num)
3321{
3322 return device->port_data[port_num].immutable.core_cap_flags &
3323 RDMA_CORE_CAP_PROT_USNIC;
3324}
3325
3326/**
3327 * rdma_cap_ib_mad - Check if the port of a device supports Infiniband
3328 * Management Datagrams.
3329 * @device: Device to check
3330 * @port_num: Port number to check
3331 *
3332 * Management Datagrams (MAD) are a required part of the InfiniBand
3333 * specification and are supported on all InfiniBand devices. A slightly
3334 * extended version are also supported on OPA interfaces.
3335 *
3336 * Return: true if the port supports sending/receiving of MAD packets.
3337 */
3338static inline bool rdma_cap_ib_mad(const struct ib_device *device, u32 port_num)
3339{
3340 return device->port_data[port_num].immutable.core_cap_flags &
3341 RDMA_CORE_CAP_IB_MAD;
3342}
3343
3344/**
3345 * rdma_cap_opa_mad - Check if the port of device provides support for OPA
3346 * Management Datagrams.
3347 * @device: Device to check
3348 * @port_num: Port number to check
3349 *
3350 * Intel OmniPath devices extend and/or replace the InfiniBand Management
3351 * datagrams with their own versions. These OPA MADs share many but not all of
3352 * the characteristics of InfiniBand MADs.
3353 *
3354 * OPA MADs differ in the following ways:
3355 *
3356 * 1) MADs are variable size up to 2K
3357 * IBTA defined MADs remain fixed at 256 bytes
3358 * 2) OPA SMPs must carry valid PKeys
3359 * 3) OPA SMP packets are a different format
3360 *
3361 * Return: true if the port supports OPA MAD packet formats.
3362 */
3363static inline bool rdma_cap_opa_mad(struct ib_device *device, u32 port_num)
3364{
3365 return device->port_data[port_num].immutable.core_cap_flags &
3366 RDMA_CORE_CAP_OPA_MAD;
3367}
3368
3369/**
3370 * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband
3371 * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI).
3372 * @device: Device to check
3373 * @port_num: Port number to check
3374 *
3375 * Each InfiniBand node is required to provide a Subnet Management Agent
3376 * that the subnet manager can access. Prior to the fabric being fully
3377 * configured by the subnet manager, the SMA is accessed via a well known
3378 * interface called the Subnet Management Interface (SMI). This interface
3379 * uses directed route packets to communicate with the SM to get around the
3380 * chicken and egg problem of the SM needing to know what's on the fabric
3381 * in order to configure the fabric, and needing to configure the fabric in
3382 * order to send packets to the devices on the fabric. These directed
3383 * route packets do not need the fabric fully configured in order to reach
3384 * their destination. The SMI is the only method allowed to send
3385 * directed route packets on an InfiniBand fabric.
3386 *
3387 * Return: true if the port provides an SMI.
3388 */
3389static inline bool rdma_cap_ib_smi(const struct ib_device *device, u32 port_num)
3390{
3391 return device->port_data[port_num].immutable.core_cap_flags &
3392 RDMA_CORE_CAP_IB_SMI;
3393}
3394
3395/**
3396 * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband
3397 * Communication Manager.
3398 * @device: Device to check
3399 * @port_num: Port number to check
3400 *
3401 * The InfiniBand Communication Manager is one of many pre-defined General
3402 * Service Agents (GSA) that are accessed via the General Service
3403 * Interface (GSI). It's role is to facilitate establishment of connections
3404 * between nodes as well as other management related tasks for established
3405 * connections.
3406 *
3407 * Return: true if the port supports an IB CM (this does not guarantee that
3408 * a CM is actually running however).
3409 */
3410static inline bool rdma_cap_ib_cm(const struct ib_device *device, u32 port_num)
3411{
3412 return device->port_data[port_num].immutable.core_cap_flags &
3413 RDMA_CORE_CAP_IB_CM;
3414}
3415
3416/**
3417 * rdma_cap_iw_cm - Check if the port of device has the capability IWARP
3418 * Communication Manager.
3419 * @device: Device to check
3420 * @port_num: Port number to check
3421 *
3422 * Similar to above, but specific to iWARP connections which have a different
3423 * managment protocol than InfiniBand.
3424 *
3425 * Return: true if the port supports an iWARP CM (this does not guarantee that
3426 * a CM is actually running however).
3427 */
3428static inline bool rdma_cap_iw_cm(const struct ib_device *device, u32 port_num)
3429{
3430 return device->port_data[port_num].immutable.core_cap_flags &
3431 RDMA_CORE_CAP_IW_CM;
3432}
3433
3434/**
3435 * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
3436 * Subnet Administration.
3437 * @device: Device to check
3438 * @port_num: Port number to check
3439 *
3440 * An InfiniBand Subnet Administration (SA) service is a pre-defined General
3441 * Service Agent (GSA) provided by the Subnet Manager (SM). On InfiniBand
3442 * fabrics, devices should resolve routes to other hosts by contacting the
3443 * SA to query the proper route.
3444 *
3445 * Return: true if the port should act as a client to the fabric Subnet
3446 * Administration interface. This does not imply that the SA service is
3447 * running locally.
3448 */
3449static inline bool rdma_cap_ib_sa(const struct ib_device *device, u32 port_num)
3450{
3451 return device->port_data[port_num].immutable.core_cap_flags &
3452 RDMA_CORE_CAP_IB_SA;
3453}
3454
3455/**
3456 * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband
3457 * Multicast.
3458 * @device: Device to check
3459 * @port_num: Port number to check
3460 *
3461 * InfiniBand multicast registration is more complex than normal IPv4 or
3462 * IPv6 multicast registration. Each Host Channel Adapter must register
3463 * with the Subnet Manager when it wishes to join a multicast group. It
3464 * should do so only once regardless of how many queue pairs it subscribes
3465 * to this group. And it should leave the group only after all queue pairs
3466 * attached to the group have been detached.
3467 *
3468 * Return: true if the port must undertake the additional adminstrative
3469 * overhead of registering/unregistering with the SM and tracking of the
3470 * total number of queue pairs attached to the multicast group.
3471 */
3472static inline bool rdma_cap_ib_mcast(const struct ib_device *device,
3473 u32 port_num)
3474{
3475 return rdma_cap_ib_sa(device, port_num);
3476}
3477
3478/**
3479 * rdma_cap_af_ib - Check if the port of device has the capability
3480 * Native Infiniband Address.
3481 * @device: Device to check
3482 * @port_num: Port number to check
3483 *
3484 * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default
3485 * GID. RoCE uses a different mechanism, but still generates a GID via
3486 * a prescribed mechanism and port specific data.
3487 *
3488 * Return: true if the port uses a GID address to identify devices on the
3489 * network.
3490 */
3491static inline bool rdma_cap_af_ib(const struct ib_device *device, u32 port_num)
3492{
3493 return device->port_data[port_num].immutable.core_cap_flags &
3494 RDMA_CORE_CAP_AF_IB;
3495}
3496
3497/**
3498 * rdma_cap_eth_ah - Check if the port of device has the capability
3499 * Ethernet Address Handle.
3500 * @device: Device to check
3501 * @port_num: Port number to check
3502 *
3503 * RoCE is InfiniBand over Ethernet, and it uses a well defined technique
3504 * to fabricate GIDs over Ethernet/IP specific addresses native to the
3505 * port. Normally, packet headers are generated by the sending host
3506 * adapter, but when sending connectionless datagrams, we must manually
3507 * inject the proper headers for the fabric we are communicating over.
3508 *
3509 * Return: true if we are running as a RoCE port and must force the
3510 * addition of a Global Route Header built from our Ethernet Address
3511 * Handle into our header list for connectionless packets.
3512 */
3513static inline bool rdma_cap_eth_ah(const struct ib_device *device, u32 port_num)
3514{
3515 return device->port_data[port_num].immutable.core_cap_flags &
3516 RDMA_CORE_CAP_ETH_AH;
3517}
3518
3519/**
3520 * rdma_cap_opa_ah - Check if the port of device supports
3521 * OPA Address handles
3522 * @device: Device to check
3523 * @port_num: Port number to check
3524 *
3525 * Return: true if we are running on an OPA device which supports
3526 * the extended OPA addressing.
3527 */
3528static inline bool rdma_cap_opa_ah(struct ib_device *device, u32 port_num)
3529{
3530 return (device->port_data[port_num].immutable.core_cap_flags &
3531 RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
3532}
3533
3534/**
3535 * rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
3536 *
3537 * @device: Device
3538 * @port_num: Port number
3539 *
3540 * This MAD size includes the MAD headers and MAD payload. No other headers
3541 * are included.
3542 *
3543 * Return the max MAD size required by the Port. Will return 0 if the port
3544 * does not support MADs
3545 */
3546static inline size_t rdma_max_mad_size(const struct ib_device *device,
3547 u32 port_num)
3548{
3549 return device->port_data[port_num].immutable.max_mad_size;
3550}
3551
3552/**
3553 * rdma_cap_roce_gid_table - Check if the port of device uses roce_gid_table
3554 * @device: Device to check
3555 * @port_num: Port number to check
3556 *
3557 * RoCE GID table mechanism manages the various GIDs for a device.
3558 *
3559 * NOTE: if allocating the port's GID table has failed, this call will still
3560 * return true, but any RoCE GID table API will fail.
3561 *
3562 * Return: true if the port uses RoCE GID table mechanism in order to manage
3563 * its GIDs.
3564 */
3565static inline bool rdma_cap_roce_gid_table(const struct ib_device *device,
3566 u32 port_num)
3567{
3568 return rdma_protocol_roce(device, port_num) &&
3569 device->ops.add_gid && device->ops.del_gid;
3570}
3571
3572/*
3573 * Check if the device supports READ W/ INVALIDATE.
3574 */
3575static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
3576{
3577 /*
3578 * iWarp drivers must support READ W/ INVALIDATE. No other protocol
3579 * has support for it yet.
3580 */
3581 return rdma_protocol_iwarp(dev, port_num);
3582}
3583
3584/**
3585 * rdma_core_cap_opa_port - Return whether the RDMA Port is OPA or not.
3586 * @device: Device
3587 * @port_num: 1 based Port number
3588 *
3589 * Return true if port is an Intel OPA port , false if not
3590 */
3591static inline bool rdma_core_cap_opa_port(struct ib_device *device,
3592 u32 port_num)
3593{
3594 return (device->port_data[port_num].immutable.core_cap_flags &
3595 RDMA_CORE_PORT_INTEL_OPA) == RDMA_CORE_PORT_INTEL_OPA;
3596}
3597
3598/**
3599 * rdma_mtu_enum_to_int - Return the mtu of the port as an integer value.
3600 * @device: Device
3601 * @port: Port number
3602 * @mtu: enum value of MTU
3603 *
3604 * Return the MTU size supported by the port as an integer value. Will return
3605 * -1 if enum value of mtu is not supported.
3606 */
3607static inline int rdma_mtu_enum_to_int(struct ib_device *device, u32 port,
3608 int mtu)
3609{
3610 if (rdma_core_cap_opa_port(device, port))
3611 return opa_mtu_enum_to_int((enum opa_mtu)mtu);
3612 else
3613 return ib_mtu_enum_to_int((enum ib_mtu)mtu);
3614}
3615
3616/**
3617 * rdma_mtu_from_attr - Return the mtu of the port from the port attribute.
3618 * @device: Device
3619 * @port: Port number
3620 * @attr: port attribute
3621 *
3622 * Return the MTU size supported by the port as an integer value.
3623 */
3624static inline int rdma_mtu_from_attr(struct ib_device *device, u32 port,
3625 struct ib_port_attr *attr)
3626{
3627 if (rdma_core_cap_opa_port(device, port))
3628 return attr->phys_mtu;
3629 else
3630 return ib_mtu_enum_to_int(attr->max_mtu);
3631}
3632
3633int ib_set_vf_link_state(struct ib_device *device, int vf, u32 port,
3634 int state);
3635int ib_get_vf_config(struct ib_device *device, int vf, u32 port,
3636 struct ifla_vf_info *info);
3637int ib_get_vf_stats(struct ib_device *device, int vf, u32 port,
3638 struct ifla_vf_stats *stats);
3639int ib_get_vf_guid(struct ib_device *device, int vf, u32 port,
3640 struct ifla_vf_guid *node_guid,
3641 struct ifla_vf_guid *port_guid);
3642int ib_set_vf_guid(struct ib_device *device, int vf, u32 port, u64 guid,
3643 int type);
3644
3645int ib_query_pkey(struct ib_device *device,
3646 u32 port_num, u16 index, u16 *pkey);
3647
3648int ib_modify_device(struct ib_device *device,
3649 int device_modify_mask,
3650 struct ib_device_modify *device_modify);
3651
3652int ib_modify_port(struct ib_device *device,
3653 u32 port_num, int port_modify_mask,
3654 struct ib_port_modify *port_modify);
3655
3656int ib_find_gid(struct ib_device *device, union ib_gid *gid,
3657 u32 *port_num, u16 *index);
3658
3659int ib_find_pkey(struct ib_device *device,
3660 u32 port_num, u16 pkey, u16 *index);
3661
3662enum ib_pd_flags {
3663 /*
3664 * Create a memory registration for all memory in the system and place
3665 * the rkey for it into pd->unsafe_global_rkey. This can be used by
3666 * ULPs to avoid the overhead of dynamic MRs.
3667 *
3668 * This flag is generally considered unsafe and must only be used in
3669 * extremly trusted environments. Every use of it will log a warning
3670 * in the kernel log.
3671 */
3672 IB_PD_UNSAFE_GLOBAL_RKEY = 0x01,
3673};
3674
3675struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
3676 const char *caller);
3677
3678/**
3679 * ib_alloc_pd - Allocates an unused protection domain.
3680 * @device: The device on which to allocate the protection domain.
3681 * @flags: protection domain flags
3682 *
3683 * A protection domain object provides an association between QPs, shared
3684 * receive queues, address handles, memory regions, and memory windows.
3685 *
3686 * Every PD has a local_dma_lkey which can be used as the lkey value for local
3687 * memory operations.
3688 */
3689#define ib_alloc_pd(device, flags) \
3690 __ib_alloc_pd((device), (flags), KBUILD_MODNAME)
3691
3692int ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata);
3693
3694/**
3695 * ib_dealloc_pd - Deallocate kernel PD
3696 * @pd: The protection domain
3697 *
3698 * NOTE: for user PD use ib_dealloc_pd_user with valid udata!
3699 */
3700static inline void ib_dealloc_pd(struct ib_pd *pd)
3701{
3702 int ret = ib_dealloc_pd_user(pd, NULL);
3703
3704 WARN_ONCE(ret, "Destroy of kernel PD shouldn't fail");
3705}
3706
3707enum rdma_create_ah_flags {
3708 /* In a sleepable context */
3709 RDMA_CREATE_AH_SLEEPABLE = BIT(0),
3710};
3711
3712/**
3713 * rdma_create_ah - Creates an address handle for the given address vector.
3714 * @pd: The protection domain associated with the address handle.
3715 * @ah_attr: The attributes of the address vector.
3716 * @flags: Create address handle flags (see enum rdma_create_ah_flags).
3717 *
3718 * The address handle is used to reference a local or global destination
3719 * in all UD QP post sends.
3720 */
3721struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
3722 u32 flags);
3723
3724/**
3725 * rdma_create_user_ah - Creates an address handle for the given address vector.
3726 * It resolves destination mac address for ah attribute of RoCE type.
3727 * @pd: The protection domain associated with the address handle.
3728 * @ah_attr: The attributes of the address vector.
3729 * @udata: pointer to user's input output buffer information need by
3730 * provider driver.
3731 *
3732 * It returns 0 on success and returns appropriate error code on error.
3733 * The address handle is used to reference a local or global destination
3734 * in all UD QP post sends.
3735 */
3736struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
3737 struct rdma_ah_attr *ah_attr,
3738 struct ib_udata *udata);
3739/**
3740 * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header
3741 * work completion.
3742 * @hdr: the L3 header to parse
3743 * @net_type: type of header to parse
3744 * @sgid: place to store source gid
3745 * @dgid: place to store destination gid
3746 */
3747int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
3748 enum rdma_network_type net_type,
3749 union ib_gid *sgid, union ib_gid *dgid);
3750
3751/**
3752 * ib_get_rdma_header_version - Get the header version
3753 * @hdr: the L3 header to parse
3754 */
3755int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
3756
3757/**
3758 * ib_init_ah_attr_from_wc - Initializes address handle attributes from a
3759 * work completion.
3760 * @device: Device on which the received message arrived.
3761 * @port_num: Port on which the received message arrived.
3762 * @wc: Work completion associated with the received message.
3763 * @grh: References the received global route header. This parameter is
3764 * ignored unless the work completion indicates that the GRH is valid.
3765 * @ah_attr: Returned attributes that can be used when creating an address
3766 * handle for replying to the message.
3767 * When ib_init_ah_attr_from_wc() returns success,
3768 * (a) for IB link layer it optionally contains a reference to SGID attribute
3769 * when GRH is present for IB link layer.
3770 * (b) for RoCE link layer it contains a reference to SGID attribute.
3771 * User must invoke rdma_cleanup_ah_attr_gid_attr() to release reference to SGID
3772 * attributes which are initialized using ib_init_ah_attr_from_wc().
3773 *
3774 */
3775int ib_init_ah_attr_from_wc(struct ib_device *device, u32 port_num,
3776 const struct ib_wc *wc, const struct ib_grh *grh,
3777 struct rdma_ah_attr *ah_attr);
3778
3779/**
3780 * ib_create_ah_from_wc - Creates an address handle associated with the
3781 * sender of the specified work completion.
3782 * @pd: The protection domain associated with the address handle.
3783 * @wc: Work completion information associated with a received message.
3784 * @grh: References the received global route header. This parameter is
3785 * ignored unless the work completion indicates that the GRH is valid.
3786 * @port_num: The outbound port number to associate with the address.
3787 *
3788 * The address handle is used to reference a local or global destination
3789 * in all UD QP post sends.
3790 */
3791struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
3792 const struct ib_grh *grh, u32 port_num);
3793
3794/**
3795 * rdma_modify_ah - Modifies the address vector associated with an address
3796 * handle.
3797 * @ah: The address handle to modify.
3798 * @ah_attr: The new address vector attributes to associate with the
3799 * address handle.
3800 */
3801int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3802
3803/**
3804 * rdma_query_ah - Queries the address vector associated with an address
3805 * handle.
3806 * @ah: The address handle to query.
3807 * @ah_attr: The address vector attributes associated with the address
3808 * handle.
3809 */
3810int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
3811
3812enum rdma_destroy_ah_flags {
3813 /* In a sleepable context */
3814 RDMA_DESTROY_AH_SLEEPABLE = BIT(0),
3815};
3816
3817/**
3818 * rdma_destroy_ah_user - Destroys an address handle.
3819 * @ah: The address handle to destroy.
3820 * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
3821 * @udata: Valid user data or NULL for kernel objects
3822 */
3823int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata);
3824
3825/**
3826 * rdma_destroy_ah - Destroys an kernel address handle.
3827 * @ah: The address handle to destroy.
3828 * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
3829 *
3830 * NOTE: for user ah use rdma_destroy_ah_user with valid udata!
3831 */
3832static inline void rdma_destroy_ah(struct ib_ah *ah, u32 flags)
3833{
3834 int ret = rdma_destroy_ah_user(ah, flags, NULL);
3835
3836 WARN_ONCE(ret, "Destroy of kernel AH shouldn't fail");
3837}
3838
3839struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
3840 struct ib_srq_init_attr *srq_init_attr,
3841 struct ib_usrq_object *uobject,
3842 struct ib_udata *udata);
3843static inline struct ib_srq *
3844ib_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *srq_init_attr)
3845{
3846 if (!pd->device->ops.create_srq)
3847 return ERR_PTR(-EOPNOTSUPP);
3848
3849 return ib_create_srq_user(pd, srq_init_attr, NULL, NULL);
3850}
3851
3852/**
3853 * ib_modify_srq - Modifies the attributes for the specified SRQ.
3854 * @srq: The SRQ to modify.
3855 * @srq_attr: On input, specifies the SRQ attributes to modify. On output,
3856 * the current values of selected SRQ attributes are returned.
3857 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
3858 * are being modified.
3859 *
3860 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
3861 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
3862 * the number of receives queued drops below the limit.
3863 */
3864int ib_modify_srq(struct ib_srq *srq,
3865 struct ib_srq_attr *srq_attr,
3866 enum ib_srq_attr_mask srq_attr_mask);
3867
3868/**
3869 * ib_query_srq - Returns the attribute list and current values for the
3870 * specified SRQ.
3871 * @srq: The SRQ to query.
3872 * @srq_attr: The attributes of the specified SRQ.
3873 */
3874int ib_query_srq(struct ib_srq *srq,
3875 struct ib_srq_attr *srq_attr);
3876
3877/**
3878 * ib_destroy_srq_user - Destroys the specified SRQ.
3879 * @srq: The SRQ to destroy.
3880 * @udata: Valid user data or NULL for kernel objects
3881 */
3882int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata);
3883
3884/**
3885 * ib_destroy_srq - Destroys the specified kernel SRQ.
3886 * @srq: The SRQ to destroy.
3887 *
3888 * NOTE: for user srq use ib_destroy_srq_user with valid udata!
3889 */
3890static inline void ib_destroy_srq(struct ib_srq *srq)
3891{
3892 int ret = ib_destroy_srq_user(srq, NULL);
3893
3894 WARN_ONCE(ret, "Destroy of kernel SRQ shouldn't fail");
3895}
3896
3897/**
3898 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
3899 * @srq: The SRQ to post the work request on.
3900 * @recv_wr: A list of work requests to post on the receive queue.
3901 * @bad_recv_wr: On an immediate failure, this parameter will reference
3902 * the work request that failed to be posted on the QP.
3903 */
3904static inline int ib_post_srq_recv(struct ib_srq *srq,
3905 const struct ib_recv_wr *recv_wr,
3906 const struct ib_recv_wr **bad_recv_wr)
3907{
3908 const struct ib_recv_wr *dummy;
3909
3910 return srq->device->ops.post_srq_recv(srq, recv_wr,
3911 bad_recv_wr ? : &dummy);
3912}
3913
3914struct ib_qp *ib_create_qp_kernel(struct ib_pd *pd,
3915 struct ib_qp_init_attr *qp_init_attr,
3916 const char *caller);
3917/**
3918 * ib_create_qp - Creates a kernel QP associated with the specific protection
3919 * domain.
3920 * @pd: The protection domain associated with the QP.
3921 * @init_attr: A list of initial attributes required to create the
3922 * QP. If QP creation succeeds, then the attributes are updated to
3923 * the actual capabilities of the created QP.
3924 */
3925static inline struct ib_qp *ib_create_qp(struct ib_pd *pd,
3926 struct ib_qp_init_attr *init_attr)
3927{
3928 return ib_create_qp_kernel(pd, init_attr, KBUILD_MODNAME);
3929}
3930
3931/**
3932 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
3933 * @qp: The QP to modify.
3934 * @attr: On input, specifies the QP attributes to modify. On output,
3935 * the current values of selected QP attributes are returned.
3936 * @attr_mask: A bit-mask used to specify which attributes of the QP
3937 * are being modified.
3938 * @udata: pointer to user's input output buffer information
3939 * are being modified.
3940 * It returns 0 on success and returns appropriate error code on error.
3941 */
3942int ib_modify_qp_with_udata(struct ib_qp *qp,
3943 struct ib_qp_attr *attr,
3944 int attr_mask,
3945 struct ib_udata *udata);
3946
3947/**
3948 * ib_modify_qp - Modifies the attributes for the specified QP and then
3949 * transitions the QP to the given state.
3950 * @qp: The QP to modify.
3951 * @qp_attr: On input, specifies the QP attributes to modify. On output,
3952 * the current values of selected QP attributes are returned.
3953 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
3954 * are being modified.
3955 */
3956int ib_modify_qp(struct ib_qp *qp,
3957 struct ib_qp_attr *qp_attr,
3958 int qp_attr_mask);
3959
3960/**
3961 * ib_query_qp - Returns the attribute list and current values for the
3962 * specified QP.
3963 * @qp: The QP to query.
3964 * @qp_attr: The attributes of the specified QP.
3965 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
3966 * @qp_init_attr: Additional attributes of the selected QP.
3967 *
3968 * The qp_attr_mask may be used to limit the query to gathering only the
3969 * selected attributes.
3970 */
3971int ib_query_qp(struct ib_qp *qp,
3972 struct ib_qp_attr *qp_attr,
3973 int qp_attr_mask,
3974 struct ib_qp_init_attr *qp_init_attr);
3975
3976/**
3977 * ib_destroy_qp - Destroys the specified QP.
3978 * @qp: The QP to destroy.
3979 * @udata: Valid udata or NULL for kernel objects
3980 */
3981int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata);
3982
3983/**
3984 * ib_destroy_qp - Destroys the specified kernel QP.
3985 * @qp: The QP to destroy.
3986 *
3987 * NOTE: for user qp use ib_destroy_qp_user with valid udata!
3988 */
3989static inline int ib_destroy_qp(struct ib_qp *qp)
3990{
3991 return ib_destroy_qp_user(qp, NULL);
3992}
3993
3994/**
3995 * ib_open_qp - Obtain a reference to an existing sharable QP.
3996 * @xrcd: XRC domain
3997 * @qp_open_attr: Attributes identifying the QP to open.
3998 *
3999 * Returns a reference to a sharable QP.
4000 */
4001struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
4002 struct ib_qp_open_attr *qp_open_attr);
4003
4004/**
4005 * ib_close_qp - Release an external reference to a QP.
4006 * @qp: The QP handle to release
4007 *
4008 * The opened QP handle is released by the caller. The underlying
4009 * shared QP is not destroyed until all internal references are released.
4010 */
4011int ib_close_qp(struct ib_qp *qp);
4012
4013/**
4014 * ib_post_send - Posts a list of work requests to the send queue of
4015 * the specified QP.
4016 * @qp: The QP to post the work request on.
4017 * @send_wr: A list of work requests to post on the send queue.
4018 * @bad_send_wr: On an immediate failure, this parameter will reference
4019 * the work request that failed to be posted on the QP.
4020 *
4021 * While IBA Vol. 1 section 11.4.1.1 specifies that if an immediate
4022 * error is returned, the QP state shall not be affected,
4023 * ib_post_send() will return an immediate error after queueing any
4024 * earlier work requests in the list.
4025 */
4026static inline int ib_post_send(struct ib_qp *qp,
4027 const struct ib_send_wr *send_wr,
4028 const struct ib_send_wr **bad_send_wr)
4029{
4030 const struct ib_send_wr *dummy;
4031
4032 return qp->device->ops.post_send(qp, send_wr, bad_send_wr ? : &dummy);
4033}
4034
4035/**
4036 * ib_post_recv - Posts a list of work requests to the receive queue of
4037 * the specified QP.
4038 * @qp: The QP to post the work request on.
4039 * @recv_wr: A list of work requests to post on the receive queue.
4040 * @bad_recv_wr: On an immediate failure, this parameter will reference
4041 * the work request that failed to be posted on the QP.
4042 */
4043static inline int ib_post_recv(struct ib_qp *qp,
4044 const struct ib_recv_wr *recv_wr,
4045 const struct ib_recv_wr **bad_recv_wr)
4046{
4047 const struct ib_recv_wr *dummy;
4048
4049 return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy);
4050}
4051
4052struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe,
4053 int comp_vector, enum ib_poll_context poll_ctx,
4054 const char *caller);
4055static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
4056 int nr_cqe, int comp_vector,
4057 enum ib_poll_context poll_ctx)
4058{
4059 return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx,
4060 KBUILD_MODNAME);
4061}
4062
4063struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
4064 int nr_cqe, enum ib_poll_context poll_ctx,
4065 const char *caller);
4066
4067/**
4068 * ib_alloc_cq_any: Allocate kernel CQ
4069 * @dev: The IB device
4070 * @private: Private data attached to the CQE
4071 * @nr_cqe: Number of CQEs in the CQ
4072 * @poll_ctx: Context used for polling the CQ
4073 */
4074static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev,
4075 void *private, int nr_cqe,
4076 enum ib_poll_context poll_ctx)
4077{
4078 return __ib_alloc_cq_any(dev, private, nr_cqe, poll_ctx,
4079 KBUILD_MODNAME);
4080}
4081
4082void ib_free_cq(struct ib_cq *cq);
4083int ib_process_cq_direct(struct ib_cq *cq, int budget);
4084
4085/**
4086 * ib_create_cq - Creates a CQ on the specified device.
4087 * @device: The device on which to create the CQ.
4088 * @comp_handler: A user-specified callback that is invoked when a
4089 * completion event occurs on the CQ.
4090 * @event_handler: A user-specified callback that is invoked when an
4091 * asynchronous event not associated with a completion occurs on the CQ.
4092 * @cq_context: Context associated with the CQ returned to the user via
4093 * the associated completion and event handlers.
4094 * @cq_attr: The attributes the CQ should be created upon.
4095 *
4096 * Users can examine the cq structure to determine the actual CQ size.
4097 */
4098struct ib_cq *__ib_create_cq(struct ib_device *device,
4099 ib_comp_handler comp_handler,
4100 void (*event_handler)(struct ib_event *, void *),
4101 void *cq_context,
4102 const struct ib_cq_init_attr *cq_attr,
4103 const char *caller);
4104#define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
4105 __ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME)
4106
4107/**
4108 * rdma_set_cq_moderation - Modifies moderation params of the CQ
4109 * @cq: The CQ to modify.
4110 * @cq_count: number of CQEs that will trigger an event
4111 * @cq_period: max period of time in usec before triggering an event
4112 *
4113 */
4114int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period);
4115
4116/**
4117 * ib_destroy_cq_user - Destroys the specified CQ.
4118 * @cq: The CQ to destroy.
4119 * @udata: Valid user data or NULL for kernel objects
4120 */
4121int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata);
4122
4123/**
4124 * ib_destroy_cq - Destroys the specified kernel CQ.
4125 * @cq: The CQ to destroy.
4126 *
4127 * NOTE: for user cq use ib_destroy_cq_user with valid udata!
4128 */
4129static inline void ib_destroy_cq(struct ib_cq *cq)
4130{
4131 int ret = ib_destroy_cq_user(cq, NULL);
4132
4133 WARN_ONCE(ret, "Destroy of kernel CQ shouldn't fail");
4134}
4135
4136/**
4137 * ib_poll_cq - poll a CQ for completion(s)
4138 * @cq:the CQ being polled
4139 * @num_entries:maximum number of completions to return
4140 * @wc:array of at least @num_entries &struct ib_wc where completions
4141 * will be returned
4142 *
4143 * Poll a CQ for (possibly multiple) completions. If the return value
4144 * is < 0, an error occurred. If the return value is >= 0, it is the
4145 * number of completions returned. If the return value is
4146 * non-negative and < num_entries, then the CQ was emptied.
4147 */
4148static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
4149 struct ib_wc *wc)
4150{
4151 return cq->device->ops.poll_cq(cq, num_entries, wc);
4152}
4153
4154/**
4155 * ib_req_notify_cq - Request completion notification on a CQ.
4156 * @cq: The CQ to generate an event for.
4157 * @flags:
4158 * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
4159 * to request an event on the next solicited event or next work
4160 * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
4161 * may also be |ed in to request a hint about missed events, as
4162 * described below.
4163 *
4164 * Return Value:
4165 * < 0 means an error occurred while requesting notification
4166 * == 0 means notification was requested successfully, and if
4167 * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
4168 * were missed and it is safe to wait for another event. In
4169 * this case is it guaranteed that any work completions added
4170 * to the CQ since the last CQ poll will trigger a completion
4171 * notification event.
4172 * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
4173 * in. It means that the consumer must poll the CQ again to
4174 * make sure it is empty to avoid missing an event because of a
4175 * race between requesting notification and an entry being
4176 * added to the CQ. This return value means it is possible
4177 * (but not guaranteed) that a work completion has been added
4178 * to the CQ since the last poll without triggering a
4179 * completion notification event.
4180 */
4181static inline int ib_req_notify_cq(struct ib_cq *cq,
4182 enum ib_cq_notify_flags flags)
4183{
4184 return cq->device->ops.req_notify_cq(cq, flags);
4185}
4186
4187struct ib_cq *ib_cq_pool_get(struct ib_device *dev, unsigned int nr_cqe,
4188 int comp_vector_hint,
4189 enum ib_poll_context poll_ctx);
4190
4191void ib_cq_pool_put(struct ib_cq *cq, unsigned int nr_cqe);
4192
4193/*
4194 * Drivers that don't need a DMA mapping at the RDMA layer, set dma_device to
4195 * NULL. This causes the ib_dma* helpers to just stash the kernel virtual
4196 * address into the dma address.
4197 */
4198static inline bool ib_uses_virt_dma(struct ib_device *dev)
4199{
4200 return IS_ENABLED(CONFIG_INFINIBAND_VIRT_DMA) && !dev->dma_device;
4201}
4202
4203/*
4204 * Check if a IB device's underlying DMA mapping supports P2PDMA transfers.
4205 */
4206static inline bool ib_dma_pci_p2p_dma_supported(struct ib_device *dev)
4207{
4208 if (ib_uses_virt_dma(dev))
4209 return false;
4210
4211 return dma_pci_p2pdma_supported(dev->dma_device);
4212}
4213
4214/**
4215 * ib_virt_dma_to_ptr - Convert a dma_addr to a kernel pointer
4216 * @dma_addr: The DMA address
4217 *
4218 * Used by ib_uses_virt_dma() devices to get back to the kernel pointer after
4219 * going through the dma_addr marshalling.
4220 */
4221static inline void *ib_virt_dma_to_ptr(u64 dma_addr)
4222{
4223 /* virt_dma mode maps the kvs's directly into the dma addr */
4224 return (void *)(uintptr_t)dma_addr;
4225}
4226
4227/**
4228 * ib_virt_dma_to_page - Convert a dma_addr to a struct page
4229 * @dma_addr: The DMA address
4230 *
4231 * Used by ib_uses_virt_dma() device to get back to the struct page after going
4232 * through the dma_addr marshalling.
4233 */
4234static inline struct page *ib_virt_dma_to_page(u64 dma_addr)
4235{
4236 return virt_to_page(ib_virt_dma_to_ptr(dma_addr));
4237}
4238
4239/**
4240 * ib_dma_mapping_error - check a DMA addr for error
4241 * @dev: The device for which the dma_addr was created
4242 * @dma_addr: The DMA address to check
4243 */
4244static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
4245{
4246 if (ib_uses_virt_dma(dev))
4247 return 0;
4248 return dma_mapping_error(dev->dma_device, dma_addr);
4249}
4250
4251/**
4252 * ib_dma_map_single - Map a kernel virtual address to DMA address
4253 * @dev: The device for which the dma_addr is to be created
4254 * @cpu_addr: The kernel virtual address
4255 * @size: The size of the region in bytes
4256 * @direction: The direction of the DMA
4257 */
4258static inline u64 ib_dma_map_single(struct ib_device *dev,
4259 void *cpu_addr, size_t size,
4260 enum dma_data_direction direction)
4261{
4262 if (ib_uses_virt_dma(dev))
4263 return (uintptr_t)cpu_addr;
4264 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
4265}
4266
4267/**
4268 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
4269 * @dev: The device for which the DMA address was created
4270 * @addr: The DMA address
4271 * @size: The size of the region in bytes
4272 * @direction: The direction of the DMA
4273 */
4274static inline void ib_dma_unmap_single(struct ib_device *dev,
4275 u64 addr, size_t size,
4276 enum dma_data_direction direction)
4277{
4278 if (!ib_uses_virt_dma(dev))
4279 dma_unmap_single(dev->dma_device, addr, size, direction);
4280}
4281
4282/**
4283 * ib_dma_map_page - Map a physical page to DMA address
4284 * @dev: The device for which the dma_addr is to be created
4285 * @page: The page to be mapped
4286 * @offset: The offset within the page
4287 * @size: The size of the region in bytes
4288 * @direction: The direction of the DMA
4289 */
4290static inline u64 ib_dma_map_page(struct ib_device *dev,
4291 struct page *page,
4292 unsigned long offset,
4293 size_t size,
4294 enum dma_data_direction direction)
4295{
4296 if (ib_uses_virt_dma(dev))
4297 return (uintptr_t)(page_address(page) + offset);
4298 return dma_map_page(dev->dma_device, page, offset, size, direction);
4299}
4300
4301/**
4302 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
4303 * @dev: The device for which the DMA address was created
4304 * @addr: The DMA address
4305 * @size: The size of the region in bytes
4306 * @direction: The direction of the DMA
4307 */
4308static inline void ib_dma_unmap_page(struct ib_device *dev,
4309 u64 addr, size_t size,
4310 enum dma_data_direction direction)
4311{
4312 if (!ib_uses_virt_dma(dev))
4313 dma_unmap_page(dev->dma_device, addr, size, direction);
4314}
4315
4316/**
4317 * ib_dma_map_bvec - Map a bio_vec to DMA address
4318 * @dev: The device for which the dma_addr is to be created
4319 * @bvec: The bio_vec to map
4320 * @direction: The direction of the DMA
4321 *
4322 * Returns a DMA address for the bio_vec. The caller must check the
4323 * result with ib_dma_mapping_error() before use; a failed mapping
4324 * must not be passed to ib_dma_unmap_bvec().
4325 *
4326 * For software RDMA devices (rxe, siw), returns a virtual address
4327 * and no actual DMA mapping occurs.
4328 */
4329static inline u64 ib_dma_map_bvec(struct ib_device *dev,
4330 struct bio_vec *bvec,
4331 enum dma_data_direction direction)
4332{
4333 if (ib_uses_virt_dma(dev))
4334 return (uintptr_t)bvec_virt(bvec);
4335 return dma_map_phys(dev->dma_device, bvec_phys(bvec),
4336 bvec->bv_len, direction, 0);
4337}
4338
4339/**
4340 * ib_dma_unmap_bvec - Unmap a bio_vec DMA mapping
4341 * @dev: The device for which the DMA address was created
4342 * @addr: The DMA address returned by ib_dma_map_bvec()
4343 * @size: The size of the region in bytes
4344 * @direction: The direction of the DMA
4345 *
4346 * Releases a DMA mapping created by ib_dma_map_bvec(). For software
4347 * RDMA devices this is a no-op since no actual mapping occurred.
4348 */
4349static inline void ib_dma_unmap_bvec(struct ib_device *dev,
4350 u64 addr, size_t size,
4351 enum dma_data_direction direction)
4352{
4353 if (!ib_uses_virt_dma(dev))
4354 dma_unmap_phys(dev->dma_device, addr, size, direction, 0);
4355}
4356
4357int ib_dma_virt_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents);
4358static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
4359 struct scatterlist *sg, int nents,
4360 enum dma_data_direction direction,
4361 unsigned long dma_attrs)
4362{
4363 if (ib_uses_virt_dma(dev))
4364 return ib_dma_virt_map_sg(dev, sg, nents);
4365 return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
4366 dma_attrs);
4367}
4368
4369static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
4370 struct scatterlist *sg, int nents,
4371 enum dma_data_direction direction,
4372 unsigned long dma_attrs)
4373{
4374 if (!ib_uses_virt_dma(dev))
4375 dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction,
4376 dma_attrs);
4377}
4378
4379/**
4380 * ib_dma_map_sgtable_attrs - Map a scatter/gather table to DMA addresses
4381 * @dev: The device for which the DMA addresses are to be created
4382 * @sgt: The sg_table object describing the buffer
4383 * @direction: The direction of the DMA
4384 * @dma_attrs: Optional DMA attributes for the map operation
4385 */
4386static inline int ib_dma_map_sgtable_attrs(struct ib_device *dev,
4387 struct sg_table *sgt,
4388 enum dma_data_direction direction,
4389 unsigned long dma_attrs)
4390{
4391 int nents;
4392
4393 if (ib_uses_virt_dma(dev)) {
4394 nents = ib_dma_virt_map_sg(dev, sgt->sgl, sgt->orig_nents);
4395 if (!nents)
4396 return -EIO;
4397 sgt->nents = nents;
4398 return 0;
4399 }
4400 return dma_map_sgtable(dev->dma_device, sgt, direction, dma_attrs);
4401}
4402
4403static inline void ib_dma_unmap_sgtable_attrs(struct ib_device *dev,
4404 struct sg_table *sgt,
4405 enum dma_data_direction direction,
4406 unsigned long dma_attrs)
4407{
4408 if (!ib_uses_virt_dma(dev))
4409 dma_unmap_sgtable(dev->dma_device, sgt, direction, dma_attrs);
4410}
4411
4412/**
4413 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
4414 * @dev: The device for which the DMA addresses are to be created
4415 * @sg: The array of scatter/gather entries
4416 * @nents: The number of scatter/gather entries
4417 * @direction: The direction of the DMA
4418 */
4419static inline int ib_dma_map_sg(struct ib_device *dev,
4420 struct scatterlist *sg, int nents,
4421 enum dma_data_direction direction)
4422{
4423 return ib_dma_map_sg_attrs(dev, sg, nents, direction, 0);
4424}
4425
4426/**
4427 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
4428 * @dev: The device for which the DMA addresses were created
4429 * @sg: The array of scatter/gather entries
4430 * @nents: The number of scatter/gather entries
4431 * @direction: The direction of the DMA
4432 */
4433static inline void ib_dma_unmap_sg(struct ib_device *dev,
4434 struct scatterlist *sg, int nents,
4435 enum dma_data_direction direction)
4436{
4437 ib_dma_unmap_sg_attrs(dev, sg, nents, direction, 0);
4438}
4439
4440/**
4441 * ib_dma_max_seg_size - Return the size limit of a single DMA transfer
4442 * @dev: The device to query
4443 *
4444 * The returned value represents a size in bytes.
4445 */
4446static inline unsigned int ib_dma_max_seg_size(struct ib_device *dev)
4447{
4448 if (ib_uses_virt_dma(dev))
4449 return UINT_MAX;
4450 return dma_get_max_seg_size(dev->dma_device);
4451}
4452
4453/**
4454 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
4455 * @dev: The device for which the DMA address was created
4456 * @addr: The DMA address
4457 * @size: The size of the region in bytes
4458 * @dir: The direction of the DMA
4459 */
4460static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
4461 u64 addr,
4462 size_t size,
4463 enum dma_data_direction dir)
4464{
4465 if (!ib_uses_virt_dma(dev))
4466 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
4467}
4468
4469/**
4470 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
4471 * @dev: The device for which the DMA address was created
4472 * @addr: The DMA address
4473 * @size: The size of the region in bytes
4474 * @dir: The direction of the DMA
4475 */
4476static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
4477 u64 addr,
4478 size_t size,
4479 enum dma_data_direction dir)
4480{
4481 if (!ib_uses_virt_dma(dev))
4482 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
4483}
4484
4485/* ib_reg_user_mr - register a memory region for virtual addresses from kernel
4486 * space. This function should be called when 'current' is the owning MM.
4487 */
4488struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
4489 u64 virt_addr, int mr_access_flags);
4490
4491/* ib_advise_mr - give an advice about an address range in a memory region */
4492int ib_advise_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
4493 u32 flags, struct ib_sge *sg_list, u32 num_sge);
4494/**
4495 * ib_dereg_mr_user - Deregisters a memory region and removes it from the
4496 * HCA translation table.
4497 * @mr: The memory region to deregister.
4498 * @udata: Valid user data or NULL for kernel object
4499 *
4500 * This function can fail, if the memory region has memory windows bound to it.
4501 */
4502int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata);
4503
4504/**
4505 * ib_dereg_mr - Deregisters a kernel memory region and removes it from the
4506 * HCA translation table.
4507 * @mr: The memory region to deregister.
4508 *
4509 * This function can fail, if the memory region has memory windows bound to it.
4510 *
4511 * NOTE: for user mr use ib_dereg_mr_user with valid udata!
4512 */
4513static inline int ib_dereg_mr(struct ib_mr *mr)
4514{
4515 return ib_dereg_mr_user(mr, NULL);
4516}
4517
4518struct ib_mr *ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
4519 u32 max_num_sg);
4520
4521struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
4522 u32 max_num_data_sg,
4523 u32 max_num_meta_sg);
4524
4525/**
4526 * ib_update_fast_reg_key - updates the key portion of the fast_reg MR
4527 * R_Key and L_Key.
4528 * @mr: struct ib_mr pointer to be updated.
4529 * @newkey: new key to be used.
4530 */
4531static inline void ib_update_fast_reg_key(struct ib_mr *mr, u8 newkey)
4532{
4533 mr->lkey = (mr->lkey & 0xffffff00) | newkey;
4534 mr->rkey = (mr->rkey & 0xffffff00) | newkey;
4535}
4536
4537/**
4538 * ib_inc_rkey - increments the key portion of the given rkey. Can be used
4539 * for calculating a new rkey for type 2 memory windows.
4540 * @rkey: the rkey to increment.
4541 */
4542static inline u32 ib_inc_rkey(u32 rkey)
4543{
4544 const u32 mask = 0x000000ff;
4545 return ((rkey + 1) & mask) | (rkey & ~mask);
4546}
4547
4548/**
4549 * ib_attach_mcast - Attaches the specified QP to a multicast group.
4550 * @qp: QP to attach to the multicast group. The QP must be type
4551 * IB_QPT_UD.
4552 * @gid: Multicast group GID.
4553 * @lid: Multicast group LID in host byte order.
4554 *
4555 * In order to send and receive multicast packets, subnet
4556 * administration must have created the multicast group and configured
4557 * the fabric appropriately. The port associated with the specified
4558 * QP must also be a member of the multicast group.
4559 */
4560int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4561
4562/**
4563 * ib_detach_mcast - Detaches the specified QP from a multicast group.
4564 * @qp: QP to detach from the multicast group.
4565 * @gid: Multicast group GID.
4566 * @lid: Multicast group LID in host byte order.
4567 */
4568int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
4569
4570struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device,
4571 struct inode *inode, struct ib_udata *udata);
4572int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata);
4573
4574static inline int ib_check_mr_access(struct ib_device *ib_dev,
4575 unsigned int flags)
4576{
4577 u64 device_cap = ib_dev->attrs.device_cap_flags;
4578
4579 /*
4580 * Local write permission is required if remote write or
4581 * remote atomic permission is also requested.
4582 */
4583 if (flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
4584 !(flags & IB_ACCESS_LOCAL_WRITE))
4585 return -EINVAL;
4586
4587 if (flags & ~IB_ACCESS_SUPPORTED)
4588 return -EINVAL;
4589
4590 if (flags & IB_ACCESS_ON_DEMAND &&
4591 !(ib_dev->attrs.kernel_cap_flags & IBK_ON_DEMAND_PAGING))
4592 return -EOPNOTSUPP;
4593
4594 if ((flags & IB_ACCESS_FLUSH_GLOBAL &&
4595 !(device_cap & IB_DEVICE_FLUSH_GLOBAL)) ||
4596 (flags & IB_ACCESS_FLUSH_PERSISTENT &&
4597 !(device_cap & IB_DEVICE_FLUSH_PERSISTENT)))
4598 return -EOPNOTSUPP;
4599
4600 return 0;
4601}
4602
4603static inline bool ib_access_writable(int access_flags)
4604{
4605 /*
4606 * We have writable memory backing the MR if any of the following
4607 * access flags are set. "Local write" and "remote write" obviously
4608 * require write access. "Remote atomic" can do things like fetch and
4609 * add, which will modify memory, and "MW bind" can change permissions
4610 * by binding a window.
4611 */
4612 return access_flags &
4613 (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
4614 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND);
4615}
4616
4617/**
4618 * ib_check_mr_status: lightweight check of MR status.
4619 * This routine may provide status checks on a selected
4620 * ib_mr. first use is for signature status check.
4621 *
4622 * @mr: A memory region.
4623 * @check_mask: Bitmask of which checks to perform from
4624 * ib_mr_status_check enumeration.
4625 * @mr_status: The container of relevant status checks.
4626 * failed checks will be indicated in the status bitmask
4627 * and the relevant info shall be in the error item.
4628 */
4629int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
4630 struct ib_mr_status *mr_status);
4631
4632/**
4633 * ib_device_try_get: Hold a registration lock
4634 * @dev: The device to lock
4635 *
4636 * A device under an active registration lock cannot become unregistered. It
4637 * is only possible to obtain a registration lock on a device that is fully
4638 * registered, otherwise this function returns false.
4639 *
4640 * The registration lock is only necessary for actions which require the
4641 * device to still be registered. Uses that only require the device pointer to
4642 * be valid should use get_device(&ibdev->dev) to hold the memory.
4643 *
4644 */
4645static inline bool ib_device_try_get(struct ib_device *dev)
4646{
4647 return refcount_inc_not_zero(&dev->refcount);
4648}
4649
4650void ib_device_put(struct ib_device *device);
4651struct ib_device *ib_device_get_by_netdev(struct net_device *ndev,
4652 enum rdma_driver_id driver_id);
4653struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u32 port,
4654 u16 pkey, const union ib_gid *gid,
4655 const struct sockaddr *addr);
4656int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
4657 unsigned int port);
4658struct net_device *ib_device_get_netdev(struct ib_device *ib_dev,
4659 u32 port);
4660int ib_query_netdev_port(struct ib_device *ibdev, struct net_device *ndev,
4661 u32 *port);
4662
4663static inline enum ib_port_state ib_get_curr_port_state(struct net_device *net_dev)
4664{
4665 return (netif_running(net_dev) && netif_carrier_ok(net_dev)) ?
4666 IB_PORT_ACTIVE : IB_PORT_DOWN;
4667}
4668
4669void ib_dispatch_port_state_event(struct ib_device *ibdev,
4670 struct net_device *ndev);
4671struct ib_wq *ib_create_wq(struct ib_pd *pd,
4672 struct ib_wq_init_attr *init_attr);
4673int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata);
4674
4675int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
4676 unsigned int *sg_offset, unsigned int page_size);
4677int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg,
4678 int data_sg_nents, unsigned int *data_sg_offset,
4679 struct scatterlist *meta_sg, int meta_sg_nents,
4680 unsigned int *meta_sg_offset, unsigned int page_size);
4681
4682static inline int
4683ib_map_mr_sg_zbva(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
4684 unsigned int *sg_offset, unsigned int page_size)
4685{
4686 int n;
4687
4688 n = ib_map_mr_sg(mr, sg, sg_nents, sg_offset, page_size);
4689 mr->iova = 0;
4690
4691 return n;
4692}
4693
4694int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
4695 unsigned int *sg_offset, int (*set_page)(struct ib_mr *, u64));
4696
4697void ib_drain_rq(struct ib_qp *qp);
4698void ib_drain_sq(struct ib_qp *qp);
4699void ib_drain_qp(struct ib_qp *qp);
4700
4701int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u16 *speed,
4702 u8 *width);
4703
4704static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
4705{
4706 if (attr->type == RDMA_AH_ATTR_TYPE_ROCE)
4707 return attr->roce.dmac;
4708 return NULL;
4709}
4710
4711static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid)
4712{
4713 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4714 attr->ib.dlid = (u16)dlid;
4715 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4716 attr->opa.dlid = dlid;
4717}
4718
4719static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr)
4720{
4721 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4722 return attr->ib.dlid;
4723 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4724 return attr->opa.dlid;
4725 return 0;
4726}
4727
4728static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl)
4729{
4730 attr->sl = sl;
4731}
4732
4733static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr)
4734{
4735 return attr->sl;
4736}
4737
4738static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr,
4739 u8 src_path_bits)
4740{
4741 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4742 attr->ib.src_path_bits = src_path_bits;
4743 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4744 attr->opa.src_path_bits = src_path_bits;
4745}
4746
4747static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr)
4748{
4749 if (attr->type == RDMA_AH_ATTR_TYPE_IB)
4750 return attr->ib.src_path_bits;
4751 else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4752 return attr->opa.src_path_bits;
4753 return 0;
4754}
4755
4756static inline void rdma_ah_set_make_grd(struct rdma_ah_attr *attr,
4757 bool make_grd)
4758{
4759 if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4760 attr->opa.make_grd = make_grd;
4761}
4762
4763static inline bool rdma_ah_get_make_grd(const struct rdma_ah_attr *attr)
4764{
4765 if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
4766 return attr->opa.make_grd;
4767 return false;
4768}
4769
4770static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u32 port_num)
4771{
4772 attr->port_num = port_num;
4773}
4774
4775static inline u32 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
4776{
4777 return attr->port_num;
4778}
4779
4780static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr,
4781 u8 static_rate)
4782{
4783 attr->static_rate = static_rate;
4784}
4785
4786static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr)
4787{
4788 return attr->static_rate;
4789}
4790
4791static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr,
4792 enum ib_ah_flags flag)
4793{
4794 attr->ah_flags = flag;
4795}
4796
4797static inline enum ib_ah_flags
4798 rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr)
4799{
4800 return attr->ah_flags;
4801}
4802
4803static inline const struct ib_global_route
4804 *rdma_ah_read_grh(const struct rdma_ah_attr *attr)
4805{
4806 return &attr->grh;
4807}
4808
4809/*To retrieve and modify the grh */
4810static inline struct ib_global_route
4811 *rdma_ah_retrieve_grh(struct rdma_ah_attr *attr)
4812{
4813 return &attr->grh;
4814}
4815
4816static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid)
4817{
4818 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4819
4820 memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid));
4821}
4822
4823static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr,
4824 __be64 prefix)
4825{
4826 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4827
4828 grh->dgid.global.subnet_prefix = prefix;
4829}
4830
4831static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr,
4832 __be64 if_id)
4833{
4834 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4835
4836 grh->dgid.global.interface_id = if_id;
4837}
4838
4839static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
4840 union ib_gid *dgid, u32 flow_label,
4841 u8 sgid_index, u8 hop_limit,
4842 u8 traffic_class)
4843{
4844 struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
4845
4846 attr->ah_flags = IB_AH_GRH;
4847 if (dgid)
4848 grh->dgid = *dgid;
4849 grh->flow_label = flow_label;
4850 grh->sgid_index = sgid_index;
4851 grh->hop_limit = hop_limit;
4852 grh->traffic_class = traffic_class;
4853 grh->sgid_attr = NULL;
4854}
4855
4856void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr);
4857void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
4858 u32 flow_label, u8 hop_limit, u8 traffic_class,
4859 const struct ib_gid_attr *sgid_attr);
4860void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
4861 const struct rdma_ah_attr *src);
4862void rdma_replace_ah_attr(struct rdma_ah_attr *old,
4863 const struct rdma_ah_attr *new);
4864void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src);
4865
4866/**
4867 * rdma_ah_find_type - Return address handle type.
4868 *
4869 * @dev: Device to be checked
4870 * @port_num: Port number
4871 */
4872static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
4873 u32 port_num)
4874{
4875 if (rdma_protocol_roce(dev, port_num))
4876 return RDMA_AH_ATTR_TYPE_ROCE;
4877 if (rdma_protocol_ib(dev, port_num)) {
4878 if (rdma_cap_opa_ah(dev, port_num))
4879 return RDMA_AH_ATTR_TYPE_OPA;
4880 return RDMA_AH_ATTR_TYPE_IB;
4881 }
4882 if (dev->type == RDMA_DEVICE_TYPE_SMI)
4883 return RDMA_AH_ATTR_TYPE_IB;
4884
4885 return RDMA_AH_ATTR_TYPE_UNDEFINED;
4886}
4887
4888/**
4889 * ib_lid_cpu16 - Return lid in 16bit CPU encoding.
4890 * In the current implementation the only way to
4891 * get the 32bit lid is from other sources for OPA.
4892 * For IB, lids will always be 16bits so cast the
4893 * value accordingly.
4894 *
4895 * @lid: A 32bit LID
4896 */
4897static inline u16 ib_lid_cpu16(u32 lid)
4898{
4899 WARN_ON_ONCE(lid & 0xFFFF0000);
4900 return (u16)lid;
4901}
4902
4903/**
4904 * ib_lid_be16 - Return lid in 16bit BE encoding.
4905 *
4906 * @lid: A 32bit LID
4907 */
4908static inline __be16 ib_lid_be16(u32 lid)
4909{
4910 WARN_ON_ONCE(lid & 0xFFFF0000);
4911 return cpu_to_be16((u16)lid);
4912}
4913
4914/**
4915 * rdma_roce_rescan_device - Rescan all of the network devices in the system
4916 * and add their gids, as needed, to the relevant RoCE devices.
4917 *
4918 * @ibdev: the rdma device
4919 */
4920void rdma_roce_rescan_device(struct ib_device *ibdev);
4921void rdma_roce_rescan_port(struct ib_device *ib_dev, u32 port);
4922void roce_del_all_netdev_gids(struct ib_device *ib_dev,
4923 u32 port, struct net_device *ndev);
4924
4925struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile);
4926
4927#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
4928int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs);
4929bool rdma_uattrs_has_raw_cap(const struct uverbs_attr_bundle *attrs);
4930#else
4931static inline int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs)
4932{
4933 return 0;
4934}
4935static inline bool
4936rdma_uattrs_has_raw_cap(const struct uverbs_attr_bundle *attrs)
4937{
4938 return false;
4939}
4940#endif
4941
4942struct net_device *rdma_alloc_netdev(struct ib_device *device, u32 port_num,
4943 enum rdma_netdev_t type, const char *name,
4944 unsigned char name_assign_type,
4945 void (*setup)(struct net_device *));
4946
4947int rdma_init_netdev(struct ib_device *device, u32 port_num,
4948 enum rdma_netdev_t type, const char *name,
4949 unsigned char name_assign_type,
4950 void (*setup)(struct net_device *),
4951 struct net_device *netdev);
4952
4953/**
4954 * rdma_device_to_ibdev - Get ib_device pointer from device pointer
4955 *
4956 * @device: device pointer for which ib_device pointer to retrieve
4957 *
4958 * rdma_device_to_ibdev() retrieves ib_device pointer from device.
4959 *
4960 */
4961static inline struct ib_device *rdma_device_to_ibdev(struct device *device)
4962{
4963 struct ib_core_device *coredev =
4964 container_of(device, struct ib_core_device, dev);
4965
4966 return coredev->owner;
4967}
4968
4969/**
4970 * ibdev_to_node - return the NUMA node for a given ib_device
4971 * @ibdev: device to get the NUMA node for.
4972 */
4973static inline int ibdev_to_node(struct ib_device *ibdev)
4974{
4975 struct device *parent = ibdev->dev.parent;
4976
4977 if (!parent)
4978 return NUMA_NO_NODE;
4979 return dev_to_node(parent);
4980}
4981
4982/**
4983 * rdma_device_to_drv_device - Helper macro to reach back to driver's
4984 * ib_device holder structure from device pointer.
4985 *
4986 * NOTE: New drivers should not make use of this API; This API is only for
4987 * existing drivers who have exposed sysfs entries using
4988 * ops->device_group.
4989 */
4990#define rdma_device_to_drv_device(dev, drv_dev_struct, ibdev_member) \
4991 container_of(rdma_device_to_ibdev(dev), drv_dev_struct, ibdev_member)
4992
4993bool rdma_dev_access_netns(const struct ib_device *device,
4994 const struct net *net);
4995
4996bool rdma_dev_has_raw_cap(const struct ib_device *dev);
4997static inline struct net *rdma_dev_net(struct ib_device *device)
4998{
4999 return read_pnet(&device->coredev.rdma_net);
5000}
5001
5002#define IB_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000)
5003#define IB_ROCE_UDP_ENCAP_VALID_PORT_MAX (0xFFFF)
5004#define IB_GRH_FLOWLABEL_MASK (0x000FFFFF)
5005
5006/**
5007 * rdma_flow_label_to_udp_sport - generate a RoCE v2 UDP src port value based
5008 * on the flow_label
5009 * @fl: flow_label value
5010 *
5011 * This function will convert the 20 bit flow_label input to a valid RoCE v2
5012 * UDP src port 14 bit value. All RoCE V2 drivers should use this same
5013 * convention.
5014 */
5015static inline u16 rdma_flow_label_to_udp_sport(u32 fl)
5016{
5017 u32 fl_low = fl & 0x03fff, fl_high = fl & 0xFC000;
5018
5019 fl_low ^= fl_high >> 14;
5020 return (u16)(fl_low | IB_ROCE_UDP_ENCAP_VALID_PORT_MIN);
5021}
5022
5023/**
5024 * rdma_calc_flow_label - generate a RDMA symmetric flow label value based on
5025 * local and remote qpn values
5026 *
5027 * This function folded the multiplication results of two qpns, 24 bit each,
5028 * fields, and converts it to a 20 bit results.
5029 *
5030 * This function will create symmetric flow_label value based on the local
5031 * and remote qpn values. this will allow both the requester and responder
5032 * to calculate the same flow_label for a given connection.
5033 *
5034 * This helper function should be used by driver in case the upper layer
5035 * provide a zero flow_label value. This is to improve entropy of RDMA
5036 * traffic in the network.
5037 */
5038static inline u32 rdma_calc_flow_label(u32 lqpn, u32 rqpn)
5039{
5040 u64 v = (u64)lqpn * rqpn;
5041
5042 v ^= v >> 20;
5043 v ^= v >> 40;
5044
5045 return (u32)(v & IB_GRH_FLOWLABEL_MASK);
5046}
5047
5048/**
5049 * rdma_get_udp_sport - Calculate and set UDP source port based on the flow
5050 * label. If flow label is not defined in GRH then
5051 * calculate it based on lqpn/rqpn.
5052 *
5053 * @fl: flow label from GRH
5054 * @lqpn: local qp number
5055 * @rqpn: remote qp number
5056 */
5057static inline u16 rdma_get_udp_sport(u32 fl, u32 lqpn, u32 rqpn)
5058{
5059 if (!fl)
5060 fl = rdma_calc_flow_label(lqpn, rqpn);
5061
5062 return rdma_flow_label_to_udp_sport(fl);
5063}
5064
5065const struct ib_port_immutable*
5066ib_port_immutable_read(struct ib_device *dev, unsigned int port);
5067
5068/** ib_add_sub_device - Add a sub IB device on an existing one
5069 *
5070 * @parent: The IB device that needs to add a sub device
5071 * @type: The type of the new sub device
5072 * @name: The name of the new sub device
5073 *
5074 *
5075 * Return 0 on success, an error code otherwise
5076 */
5077int ib_add_sub_device(struct ib_device *parent,
5078 enum rdma_nl_dev_type type,
5079 const char *name);
5080
5081
5082/** ib_del_sub_device_and_put - Delect an IB sub device while holding a 'get'
5083 *
5084 * @sub: The sub device that is going to be deleted
5085 *
5086 * Return 0 on success, an error code otherwise
5087 */
5088int ib_del_sub_device_and_put(struct ib_device *sub);
5089
5090static inline void ib_mark_name_assigned_by_user(struct ib_device *ibdev)
5091{
5092 ibdev->name_assign_type = RDMA_NAME_ASSIGN_TYPE_USER;
5093}
5094
5095#endif /* IB_VERBS_H */