Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma fixes from Doug Ledford:
"A small collection of -rc fixes. Mostly. One API addition, but that's
because we wanted to use it in a fix. There's also a bug fix that is
going to render the 5.5 kernel's soft-RoCE driver incompatible with
all soft-RoCE versions prior, but it's required to actually implement
the protocol according to the RoCE spec and required in order for the
soft-RoCE driver to be able to successfully work with actual RoCE
hardware.

Summary:

- Update Steve Wise info

- Fix for soft-RoCE crc calculations (will break back compatibility,
but only with the soft-RoCE driver, which has had this bug since it
was introduced and it is an on-the-wire bug, but will make
soft-RoCE fully compatible with real RoCE hardware)

- cma init fixup

- counters oops fix

- fix for mlx4 init/teardown sequence

- fix for mkx5 steering rules

- introduce a cleanup API, which isn't a fix, but we want to use it
in the next fix

- fix for mlx5 memory management that uses API in previous patch"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
IB/mlx5: Fix device memory flows
IB/core: Introduce rdma_user_mmap_entry_insert_range() API
IB/mlx5: Fix steering rule of drop and count
IB/mlx4: Follow mirror sequence of device add during device removal
RDMA/counter: Prevent auto-binding a QP which are not tracked with res
rxe: correctly calculate iCRC for unaligned payloads
Update mailmap info for Steve Wise
RDMA/cma: add missed unregister_pernet_subsys in init failure

+183 -76
+2
.mailmap
··· 276 276 Gustavo Padovan <padovan@profusion.mobi> 277 277 Changbin Du <changbin.du@intel.com> <changbin.du@intel.com> 278 278 Changbin Du <changbin.du@intel.com> <changbin.du@gmail.com> 279 + Steve Wise <larrystevenwise@gmail.com> <swise@chelsio.com> 280 + Steve Wise <larrystevenwise@gmail.com> <swise@opengridcomputing.com>
+1
drivers/infiniband/core/cma.c
··· 4763 4763 err: 4764 4764 unregister_netdevice_notifier(&cma_nb); 4765 4765 ib_sa_unregister_client(&sa_client); 4766 + unregister_pernet_subsys(&cma_pernet_operations); 4766 4767 err_wq: 4767 4768 destroy_workqueue(cma_wq); 4768 4769 return ret;
+3
drivers/infiniband/core/counters.c
··· 286 286 struct rdma_counter *counter; 287 287 int ret; 288 288 289 + if (!qp->res.valid) 290 + return 0; 291 + 289 292 if (!rdma_is_port_valid(dev, port)) 290 293 return -EINVAL; 291 294
+39 -9
drivers/infiniband/core/ib_core_uverbs.c
··· 238 238 EXPORT_SYMBOL(rdma_user_mmap_entry_remove); 239 239 240 240 /** 241 - * rdma_user_mmap_entry_insert() - Insert an entry to the mmap_xa 241 + * rdma_user_mmap_entry_insert_range() - Insert an entry to the mmap_xa 242 + * in a given range. 242 243 * 243 244 * @ucontext: associated user context. 244 245 * @entry: the entry to insert into the mmap_xa 245 246 * @length: length of the address that will be mmapped 247 + * @min_pgoff: minimum pgoff to be returned 248 + * @max_pgoff: maximum pgoff to be returned 246 249 * 247 250 * This function should be called by drivers that use the rdma_user_mmap 248 251 * interface for implementing their mmap syscall A database of mmap offsets is 249 252 * handled in the core and helper functions are provided to insert entries 250 253 * into the database and extract entries when the user calls mmap with the 251 - * given offset. The function allocates a unique page offset that should be 252 - * provided to user, the user will use the offset to retrieve information such 253 - * as address to be mapped and how. 254 + * given offset. The function allocates a unique page offset in a given range 255 + * that should be provided to user, the user will use the offset to retrieve 256 + * information such as address to be mapped and how. 254 257 * 255 258 * Return: 0 on success and -ENOMEM on failure 256 259 */ 257 - int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext, 258 - struct rdma_user_mmap_entry *entry, 259 - size_t length) 260 + int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext, 261 + struct rdma_user_mmap_entry *entry, 262 + size_t length, u32 min_pgoff, 263 + u32 max_pgoff) 260 264 { 261 265 struct ib_uverbs_file *ufile = ucontext->ufile; 262 - XA_STATE(xas, &ucontext->mmap_xa, 0); 266 + XA_STATE(xas, &ucontext->mmap_xa, min_pgoff); 263 267 u32 xa_first, xa_last, npages; 264 268 int err; 265 269 u32 i; ··· 289 285 entry->npages = npages; 290 286 while (true) { 291 287 /* First find an empty index */ 292 - xas_find_marked(&xas, U32_MAX, XA_FREE_MARK); 288 + xas_find_marked(&xas, max_pgoff, XA_FREE_MARK); 293 289 if (xas.xa_node == XAS_RESTART) 294 290 goto err_unlock; 295 291 ··· 335 331 xa_unlock(&ucontext->mmap_xa); 336 332 mutex_unlock(&ufile->umap_lock); 337 333 return -ENOMEM; 334 + } 335 + EXPORT_SYMBOL(rdma_user_mmap_entry_insert_range); 336 + 337 + /** 338 + * rdma_user_mmap_entry_insert() - Insert an entry to the mmap_xa. 339 + * 340 + * @ucontext: associated user context. 341 + * @entry: the entry to insert into the mmap_xa 342 + * @length: length of the address that will be mmapped 343 + * 344 + * This function should be called by drivers that use the rdma_user_mmap 345 + * interface for handling user mmapped addresses. The database is handled in 346 + * the core and helper functions are provided to insert entries into the 347 + * database and extract entries when the user calls mmap with the given offset. 348 + * The function allocates a unique page offset that should be provided to user, 349 + * the user will use the offset to retrieve information such as address to 350 + * be mapped and how. 351 + * 352 + * Return: 0 on success and -ENOMEM on failure 353 + */ 354 + int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext, 355 + struct rdma_user_mmap_entry *entry, 356 + size_t length) 357 + { 358 + return rdma_user_mmap_entry_insert_range(ucontext, entry, length, 0, 359 + U32_MAX); 338 360 } 339 361 EXPORT_SYMBOL(rdma_user_mmap_entry_insert);
+5 -4
drivers/infiniband/hw/mlx4/main.c
··· 3018 3018 ibdev->ib_active = false; 3019 3019 flush_workqueue(wq); 3020 3020 3021 - mlx4_ib_close_sriov(ibdev); 3022 - mlx4_ib_mad_cleanup(ibdev); 3023 - ib_unregister_device(&ibdev->ib_dev); 3024 - mlx4_ib_diag_cleanup(ibdev); 3025 3021 if (ibdev->iboe.nb.notifier_call) { 3026 3022 if (unregister_netdevice_notifier(&ibdev->iboe.nb)) 3027 3023 pr_warn("failure unregistering notifier\n"); 3028 3024 ibdev->iboe.nb.notifier_call = NULL; 3029 3025 } 3026 + 3027 + mlx4_ib_close_sriov(ibdev); 3028 + mlx4_ib_mad_cleanup(ibdev); 3029 + ib_unregister_device(&ibdev->ib_dev); 3030 + mlx4_ib_diag_cleanup(ibdev); 3030 3031 3031 3032 mlx4_qp_release_range(dev, ibdev->steer_qpn_base, 3032 3033 ibdev->steer_qpn_count);
+7 -9
drivers/infiniband/hw/mlx5/cmd.c
··· 157 157 return -ENOMEM; 158 158 } 159 159 160 - int mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length) 160 + void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length) 161 161 { 162 162 struct mlx5_core_dev *dev = dm->dev; 163 163 u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr); ··· 175 175 MLX5_SET(dealloc_memic_in, in, memic_size, length); 176 176 177 177 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 178 + if (err) 179 + return; 178 180 179 - if (!err) { 180 - spin_lock(&dm->lock); 181 - bitmap_clear(dm->memic_alloc_pages, 182 - start_page_idx, num_pages); 183 - spin_unlock(&dm->lock); 184 - } 185 - 186 - return err; 181 + spin_lock(&dm->lock); 182 + bitmap_clear(dm->memic_alloc_pages, 183 + start_page_idx, num_pages); 184 + spin_unlock(&dm->lock); 187 185 } 188 186 189 187 int mlx5_cmd_query_ext_ppcnt_counters(struct mlx5_core_dev *dev, void *out)
+1 -1
drivers/infiniband/hw/mlx5/cmd.h
··· 46 46 void *in, int in_size); 47 47 int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr, 48 48 u64 length, u32 alignment); 49 - int mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length); 49 + void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr, u64 length); 50 50 void mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid); 51 51 void mlx5_cmd_destroy_tir(struct mlx5_core_dev *dev, u32 tirn, u16 uid); 52 52 void mlx5_cmd_destroy_tis(struct mlx5_core_dev *dev, u32 tisn, u16 uid);
+88 -51
drivers/infiniband/hw/mlx5/main.c
··· 2074 2074 virt_to_page(dev->mdev->clock_info)); 2075 2075 } 2076 2076 2077 + static void mlx5_ib_mmap_free(struct rdma_user_mmap_entry *entry) 2078 + { 2079 + struct mlx5_user_mmap_entry *mentry = to_mmmap(entry); 2080 + struct mlx5_ib_dev *dev = to_mdev(entry->ucontext->device); 2081 + struct mlx5_ib_dm *mdm; 2082 + 2083 + switch (mentry->mmap_flag) { 2084 + case MLX5_IB_MMAP_TYPE_MEMIC: 2085 + mdm = container_of(mentry, struct mlx5_ib_dm, mentry); 2086 + mlx5_cmd_dealloc_memic(&dev->dm, mdm->dev_addr, 2087 + mdm->size); 2088 + kfree(mdm); 2089 + break; 2090 + default: 2091 + WARN_ON(true); 2092 + } 2093 + } 2094 + 2077 2095 static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd, 2078 2096 struct vm_area_struct *vma, 2079 2097 struct mlx5_ib_ucontext *context) ··· 2204 2186 return err; 2205 2187 } 2206 2188 2207 - static int dm_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) 2189 + static int add_dm_mmap_entry(struct ib_ucontext *context, 2190 + struct mlx5_ib_dm *mdm, 2191 + u64 address) 2208 2192 { 2209 - struct mlx5_ib_ucontext *mctx = to_mucontext(context); 2210 - struct mlx5_ib_dev *dev = to_mdev(context->device); 2211 - u16 page_idx = get_extended_index(vma->vm_pgoff); 2212 - size_t map_size = vma->vm_end - vma->vm_start; 2213 - u32 npages = map_size >> PAGE_SHIFT; 2214 - phys_addr_t pfn; 2193 + mdm->mentry.mmap_flag = MLX5_IB_MMAP_TYPE_MEMIC; 2194 + mdm->mentry.address = address; 2195 + return rdma_user_mmap_entry_insert_range( 2196 + context, &mdm->mentry.rdma_entry, 2197 + mdm->size, 2198 + MLX5_IB_MMAP_DEVICE_MEM << 16, 2199 + (MLX5_IB_MMAP_DEVICE_MEM << 16) + (1UL << 16) - 1); 2200 + } 2215 2201 2216 - if (find_next_zero_bit(mctx->dm_pages, page_idx + npages, page_idx) != 2217 - page_idx + npages) 2202 + static unsigned long mlx5_vma_to_pgoff(struct vm_area_struct *vma) 2203 + { 2204 + unsigned long idx; 2205 + u8 command; 2206 + 2207 + command = get_command(vma->vm_pgoff); 2208 + idx = get_extended_index(vma->vm_pgoff); 2209 + 2210 + return (command << 16 | idx); 2211 + } 2212 + 2213 + static int mlx5_ib_mmap_offset(struct mlx5_ib_dev *dev, 2214 + struct vm_area_struct *vma, 2215 + struct ib_ucontext *ucontext) 2216 + { 2217 + struct mlx5_user_mmap_entry *mentry; 2218 + struct rdma_user_mmap_entry *entry; 2219 + unsigned long pgoff; 2220 + pgprot_t prot; 2221 + phys_addr_t pfn; 2222 + int ret; 2223 + 2224 + pgoff = mlx5_vma_to_pgoff(vma); 2225 + entry = rdma_user_mmap_entry_get_pgoff(ucontext, pgoff); 2226 + if (!entry) 2218 2227 return -EINVAL; 2219 2228 2220 - pfn = ((dev->mdev->bar_addr + 2221 - MLX5_CAP64_DEV_MEM(dev->mdev, memic_bar_start_addr)) >> 2222 - PAGE_SHIFT) + 2223 - page_idx; 2224 - return rdma_user_mmap_io(context, vma, pfn, map_size, 2225 - pgprot_writecombine(vma->vm_page_prot), 2226 - NULL); 2229 + mentry = to_mmmap(entry); 2230 + pfn = (mentry->address >> PAGE_SHIFT); 2231 + prot = pgprot_writecombine(vma->vm_page_prot); 2232 + ret = rdma_user_mmap_io(ucontext, vma, pfn, 2233 + entry->npages * PAGE_SIZE, 2234 + prot, 2235 + entry); 2236 + rdma_user_mmap_entry_put(&mentry->rdma_entry); 2237 + return ret; 2227 2238 } 2228 2239 2229 2240 static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma) ··· 2295 2248 case MLX5_IB_MMAP_CLOCK_INFO: 2296 2249 return mlx5_ib_mmap_clock_info_page(dev, vma, context); 2297 2250 2298 - case MLX5_IB_MMAP_DEVICE_MEM: 2299 - return dm_mmap(ibcontext, vma); 2300 - 2301 2251 default: 2302 - return -EINVAL; 2252 + return mlx5_ib_mmap_offset(dev, vma, ibcontext); 2303 2253 } 2304 2254 2305 2255 return 0; ··· 2332 2288 { 2333 2289 struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm; 2334 2290 u64 start_offset; 2335 - u32 page_idx; 2291 + u16 page_idx; 2336 2292 int err; 2293 + u64 address; 2337 2294 2338 2295 dm->size = roundup(attr->length, MLX5_MEMIC_BASE_SIZE); 2339 2296 ··· 2343 2298 if (err) 2344 2299 return err; 2345 2300 2346 - page_idx = (dm->dev_addr - pci_resource_start(dm_db->dev->pdev, 0) - 2347 - MLX5_CAP64_DEV_MEM(dm_db->dev, memic_bar_start_addr)) >> 2348 - PAGE_SHIFT; 2349 - 2350 - err = uverbs_copy_to(attrs, 2351 - MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX, 2352 - &page_idx, sizeof(page_idx)); 2301 + address = dm->dev_addr & PAGE_MASK; 2302 + err = add_dm_mmap_entry(ctx, dm, address); 2353 2303 if (err) 2354 2304 goto err_dealloc; 2305 + 2306 + page_idx = dm->mentry.rdma_entry.start_pgoff & 0xFFFF; 2307 + err = uverbs_copy_to(attrs, 2308 + MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX, 2309 + &page_idx, 2310 + sizeof(page_idx)); 2311 + if (err) 2312 + goto err_copy; 2355 2313 2356 2314 start_offset = dm->dev_addr & ~PAGE_MASK; 2357 2315 err = uverbs_copy_to(attrs, 2358 2316 MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET, 2359 2317 &start_offset, sizeof(start_offset)); 2360 2318 if (err) 2361 - goto err_dealloc; 2362 - 2363 - bitmap_set(to_mucontext(ctx)->dm_pages, page_idx, 2364 - DIV_ROUND_UP(dm->size, PAGE_SIZE)); 2319 + goto err_copy; 2365 2320 2366 2321 return 0; 2367 2322 2323 + err_copy: 2324 + rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry); 2368 2325 err_dealloc: 2369 2326 mlx5_cmd_dealloc_memic(dm_db, dm->dev_addr, dm->size); 2370 2327 ··· 2470 2423 struct mlx5_ib_ucontext *ctx = rdma_udata_to_drv_context( 2471 2424 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext); 2472 2425 struct mlx5_core_dev *dev = to_mdev(ibdm->device)->mdev; 2473 - struct mlx5_dm *dm_db = &to_mdev(ibdm->device)->dm; 2474 2426 struct mlx5_ib_dm *dm = to_mdm(ibdm); 2475 - u32 page_idx; 2476 2427 int ret; 2477 2428 2478 2429 switch (dm->type) { 2479 2430 case MLX5_IB_UAPI_DM_TYPE_MEMIC: 2480 - ret = mlx5_cmd_dealloc_memic(dm_db, dm->dev_addr, dm->size); 2481 - if (ret) 2482 - return ret; 2483 - 2484 - page_idx = (dm->dev_addr - pci_resource_start(dev->pdev, 0) - 2485 - MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr)) >> 2486 - PAGE_SHIFT; 2487 - bitmap_clear(ctx->dm_pages, page_idx, 2488 - DIV_ROUND_UP(dm->size, PAGE_SIZE)); 2489 - break; 2431 + rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry); 2432 + return 0; 2490 2433 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM: 2491 2434 ret = mlx5_dm_sw_icm_dealloc(dev, MLX5_SW_ICM_TYPE_STEERING, 2492 2435 dm->size, ctx->devx_uid, dm->dev_addr, ··· 3581 3544 } 3582 3545 3583 3546 INIT_LIST_HEAD(&handler->list); 3584 - if (dst) { 3585 - memcpy(&dest_arr[0], dst, sizeof(*dst)); 3586 - dest_num++; 3587 - } 3588 3547 3589 3548 for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) { 3590 3549 err = parse_flow_attr(dev->mdev, spec, ··· 3591 3558 3592 3559 prev_type = ((union ib_flow_spec *)ib_flow)->type; 3593 3560 ib_flow += ((union ib_flow_spec *)ib_flow)->size; 3561 + } 3562 + 3563 + if (dst && !(flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP)) { 3564 + memcpy(&dest_arr[0], dst, sizeof(*dst)); 3565 + dest_num++; 3594 3566 } 3595 3567 3596 3568 if (!flow_is_multicast_only(flow_attr)) ··· 3638 3600 } 3639 3601 3640 3602 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP) { 3641 - if (!(flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT)) { 3603 + if (!dest_num) 3642 3604 rule_dst = NULL; 3643 - dest_num = 0; 3644 - } 3645 3605 } else { 3646 3606 if (is_egress) 3647 3607 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW; ··· 6272 6236 .map_mr_sg = mlx5_ib_map_mr_sg, 6273 6237 .map_mr_sg_pi = mlx5_ib_map_mr_sg_pi, 6274 6238 .mmap = mlx5_ib_mmap, 6239 + .mmap_free = mlx5_ib_mmap_free, 6275 6240 .modify_cq = mlx5_ib_modify_cq, 6276 6241 .modify_device = mlx5_ib_modify_device, 6277 6242 .modify_port = mlx5_ib_modify_port,
+18 -1
drivers/infiniband/hw/mlx5/mlx5_ib.h
··· 118 118 MLX5_MEMIC_BASE_SIZE = 1 << MLX5_MEMIC_BASE_ALIGN, 119 119 }; 120 120 121 + enum mlx5_ib_mmap_type { 122 + MLX5_IB_MMAP_TYPE_MEMIC = 1, 123 + }; 124 + 121 125 #define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) \ 122 126 (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity)) 123 127 #define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)) ··· 139 135 u32 tdn; 140 136 141 137 u64 lib_caps; 142 - DECLARE_BITMAP(dm_pages, MLX5_MAX_MEMIC_PAGES); 143 138 u16 devx_uid; 144 139 /* For RoCE LAG TX affinity */ 145 140 atomic_t tx_port_affinity; ··· 559 556 MLX5_IB_MTT_WRITE = (1 << 1), 560 557 }; 561 558 559 + struct mlx5_user_mmap_entry { 560 + struct rdma_user_mmap_entry rdma_entry; 561 + u8 mmap_flag; 562 + u64 address; 563 + }; 564 + 562 565 struct mlx5_ib_dm { 563 566 struct ib_dm ibdm; 564 567 phys_addr_t dev_addr; ··· 576 567 } icm_dm; 577 568 /* other dm types specific params should be added here */ 578 569 }; 570 + struct mlx5_user_mmap_entry mentry; 579 571 }; 580 572 581 573 #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE) ··· 1109 1099 to_mflow_act(struct ib_flow_action *ibact) 1110 1100 { 1111 1101 return container_of(ibact, struct mlx5_ib_flow_action, ib_action); 1102 + } 1103 + 1104 + static inline struct mlx5_user_mmap_entry * 1105 + to_mmmap(struct rdma_user_mmap_entry *rdma_entry) 1106 + { 1107 + return container_of(rdma_entry, 1108 + struct mlx5_user_mmap_entry, rdma_entry); 1112 1109 } 1113 1110 1114 1111 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context,
+1 -1
drivers/infiniband/sw/rxe/rxe_recv.c
··· 389 389 390 390 calc_icrc = rxe_icrc_hdr(pkt, skb); 391 391 calc_icrc = rxe_crc32(rxe, calc_icrc, (u8 *)payload_addr(pkt), 392 - payload_size(pkt)); 392 + payload_size(pkt) + bth_pad(pkt)); 393 393 calc_icrc = (__force u32)cpu_to_be32(~calc_icrc); 394 394 if (unlikely(calc_icrc != pack_icrc)) { 395 395 if (skb->protocol == htons(ETH_P_IPV6))
+6
drivers/infiniband/sw/rxe/rxe_req.c
··· 500 500 if (err) 501 501 return err; 502 502 } 503 + if (bth_pad(pkt)) { 504 + u8 *pad = payload_addr(pkt) + paylen; 505 + 506 + memset(pad, 0, bth_pad(pkt)); 507 + crc = rxe_crc32(rxe, crc, pad, bth_pad(pkt)); 508 + } 503 509 } 504 510 p = payload_addr(pkt) + paylen + bth_pad(pkt); 505 511
+7
drivers/infiniband/sw/rxe/rxe_resp.c
··· 732 732 if (err) 733 733 pr_err("Failed copying memory\n"); 734 734 735 + if (bth_pad(&ack_pkt)) { 736 + struct rxe_dev *rxe = to_rdev(qp->ibqp.device); 737 + u8 *pad = payload_addr(&ack_pkt) + payload; 738 + 739 + memset(pad, 0, bth_pad(&ack_pkt)); 740 + icrc = rxe_crc32(rxe, icrc, pad, bth_pad(&ack_pkt)); 741 + } 735 742 p = payload_addr(&ack_pkt) + payload + bth_pad(&ack_pkt); 736 743 *p = ~icrc; 737 744
+5
include/rdma/ib_verbs.h
··· 2832 2832 int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext, 2833 2833 struct rdma_user_mmap_entry *entry, 2834 2834 size_t length); 2835 + int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext, 2836 + struct rdma_user_mmap_entry *entry, 2837 + size_t length, u32 min_pgoff, 2838 + u32 max_pgoff); 2839 + 2835 2840 struct rdma_user_mmap_entry * 2836 2841 rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext, 2837 2842 unsigned long pgoff);