Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

lib/scatterlist: Provide a dedicated function to support table append

RDMA is the only in-kernel user that uses __sg_alloc_table_from_pages to
append pages dynamically. In the next patch. That mode will be extended
and that function will get more parameters. So separate it into a unique
function to make such change more clear.

Link: https://lore.kernel.org/r/20210824142531.3877007-2-maorg@nvidia.com
Signed-off-by: Maor Gottlieb <maorg@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>

authored by

Maor Gottlieb and committed by
Jason Gunthorpe
90e7a6de 7c60610d

+90 -52
+6 -7
drivers/gpu/drm/drm_prime.c
··· 807 807 struct page **pages, unsigned int nr_pages) 808 808 { 809 809 struct sg_table *sg; 810 - struct scatterlist *sge; 811 810 size_t max_segment = 0; 811 + int err; 812 812 813 813 sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL); 814 814 if (!sg) ··· 818 818 max_segment = dma_max_mapping_size(dev->dev); 819 819 if (max_segment == 0) 820 820 max_segment = UINT_MAX; 821 - sge = __sg_alloc_table_from_pages(sg, pages, nr_pages, 0, 822 - nr_pages << PAGE_SHIFT, 823 - max_segment, 824 - NULL, 0, GFP_KERNEL); 825 - if (IS_ERR(sge)) { 821 + err = sg_alloc_table_from_pages_segment(sg, pages, nr_pages, 0, 822 + nr_pages << PAGE_SHIFT, 823 + max_segment, GFP_KERNEL); 824 + if (err) { 826 825 kfree(sg); 827 - sg = ERR_CAST(sge); 826 + sg = ERR_PTR(err); 828 827 } 829 828 return sg; 830 829 }
+4 -7
drivers/gpu/drm/i915/gem/i915_gem_userptr.c
··· 133 133 unsigned int max_segment = i915_sg_segment_size(); 134 134 struct sg_table *st; 135 135 unsigned int sg_page_sizes; 136 - struct scatterlist *sg; 137 136 struct page **pvec; 138 137 int ret; 139 138 ··· 152 153 spin_unlock(&i915->mm.notifier_lock); 153 154 154 155 alloc_table: 155 - sg = __sg_alloc_table_from_pages(st, pvec, num_pages, 0, 156 - num_pages << PAGE_SHIFT, max_segment, 157 - NULL, 0, GFP_KERNEL); 158 - if (IS_ERR(sg)) { 159 - ret = PTR_ERR(sg); 156 + ret = sg_alloc_table_from_pages_segment(st, pvec, num_pages, 0, 157 + num_pages << PAGE_SHIFT, 158 + max_segment, GFP_KERNEL); 159 + if (ret) 160 160 goto err; 161 - } 162 161 163 162 ret = i915_gem_gtt_prepare_pages(obj, st); 164 163 if (ret) {
+5 -9
drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
··· 363 363 int ret = 0; 364 364 static size_t sgl_size; 365 365 static size_t sgt_size; 366 - struct scatterlist *sg; 367 366 368 367 if (vmw_tt->mapped) 369 368 return 0; ··· 385 386 if (unlikely(ret != 0)) 386 387 return ret; 387 388 388 - sg = __sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages, 389 - vsgt->num_pages, 0, 390 - (unsigned long) vsgt->num_pages << PAGE_SHIFT, 391 - dma_get_max_seg_size(dev_priv->drm.dev), 392 - NULL, 0, GFP_KERNEL); 393 - if (IS_ERR(sg)) { 394 - ret = PTR_ERR(sg); 389 + ret = sg_alloc_table_from_pages_segment( 390 + &vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0, 391 + (unsigned long)vsgt->num_pages << PAGE_SHIFT, 392 + dma_get_max_seg_size(dev_priv->drm.dev), GFP_KERNEL); 393 + if (ret) 395 394 goto out_sg_alloc_fail; 396 - } 397 395 398 396 if (vsgt->num_pages > vmw_tt->sgt.orig_nents) { 399 397 uint64_t over_alloc =
+2 -2
drivers/infiniband/core/umem.c
··· 226 226 227 227 cur_base += ret * PAGE_SIZE; 228 228 npages -= ret; 229 - sg = __sg_alloc_table_from_pages(&umem->sg_head, page_list, ret, 230 - 0, ret << PAGE_SHIFT, 229 + sg = sg_alloc_append_table_from_pages(&umem->sg_head, page_list, 230 + ret, 0, ret << PAGE_SHIFT, 231 231 ib_dma_max_seg_size(device), sg, npages, 232 232 GFP_KERNEL); 233 233 umem->sg_nents = umem->sg_head.nents;
+35 -4
include/linux/scatterlist.h
··· 285 285 int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, 286 286 struct scatterlist *, unsigned int, gfp_t, sg_alloc_fn *); 287 287 int sg_alloc_table(struct sg_table *, unsigned int, gfp_t); 288 - struct scatterlist *__sg_alloc_table_from_pages(struct sg_table *sgt, 288 + struct scatterlist *sg_alloc_append_table_from_pages(struct sg_table *sgt, 289 289 struct page **pages, unsigned int n_pages, unsigned int offset, 290 290 unsigned long size, unsigned int max_segment, 291 291 struct scatterlist *prv, unsigned int left_pages, 292 292 gfp_t gfp_mask); 293 - int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, 294 - unsigned int n_pages, unsigned int offset, 295 - unsigned long size, gfp_t gfp_mask); 293 + int sg_alloc_table_from_pages_segment(struct sg_table *sgt, struct page **pages, 294 + unsigned int n_pages, unsigned int offset, 295 + unsigned long size, 296 + unsigned int max_segment, gfp_t gfp_mask); 297 + 298 + /** 299 + * sg_alloc_table_from_pages - Allocate and initialize an sg table from 300 + * an array of pages 301 + * @sgt: The sg table header to use 302 + * @pages: Pointer to an array of page pointers 303 + * @n_pages: Number of pages in the pages array 304 + * @offset: Offset from start of the first page to the start of a buffer 305 + * @size: Number of valid bytes in the buffer (after offset) 306 + * @gfp_mask: GFP allocation mask 307 + * 308 + * Description: 309 + * Allocate and initialize an sg table from a list of pages. Contiguous 310 + * ranges of the pages are squashed into a single scatterlist node. A user 311 + * may provide an offset at a start and a size of valid data in a buffer 312 + * specified by the page array. The returned sg table is released by 313 + * sg_free_table. 314 + * 315 + * Returns: 316 + * 0 on success, negative error on failure 317 + */ 318 + static inline int sg_alloc_table_from_pages(struct sg_table *sgt, 319 + struct page **pages, 320 + unsigned int n_pages, 321 + unsigned int offset, 322 + unsigned long size, gfp_t gfp_mask) 323 + { 324 + return sg_alloc_table_from_pages_segment(sgt, pages, n_pages, offset, 325 + size, UINT_MAX, gfp_mask); 326 + } 296 327 297 328 #ifdef CONFIG_SGL_ALLOC 298 329 struct scatterlist *sgl_alloc_order(unsigned long long length,
+20 -16
lib/scatterlist.c
··· 397 397 } 398 398 399 399 /** 400 - * __sg_alloc_table_from_pages - Allocate and initialize an sg table from 400 + * sg_alloc_append_table_from_pages - Allocate and initialize an sg table from 401 401 * an array of pages 402 402 * @sgt: The sg table header to use 403 403 * @pages: Pointer to an array of page pointers ··· 425 425 * If this function returns non-0 (eg failure), the caller must call 426 426 * sg_free_table() to cleanup any leftover allocations. 427 427 */ 428 - struct scatterlist *__sg_alloc_table_from_pages(struct sg_table *sgt, 428 + struct scatterlist *sg_alloc_append_table_from_pages(struct sg_table *sgt, 429 429 struct page **pages, unsigned int n_pages, unsigned int offset, 430 430 unsigned long size, unsigned int max_segment, 431 431 struct scatterlist *prv, unsigned int left_pages, ··· 520 520 sg_mark_end(s); 521 521 return s; 522 522 } 523 - EXPORT_SYMBOL(__sg_alloc_table_from_pages); 523 + EXPORT_SYMBOL(sg_alloc_append_table_from_pages); 524 524 525 525 /** 526 - * sg_alloc_table_from_pages - Allocate and initialize an sg table from 527 - * an array of pages 526 + * sg_alloc_table_from_pages_segment - Allocate and initialize an sg table from 527 + * an array of pages and given maximum 528 + * segment. 528 529 * @sgt: The sg table header to use 529 530 * @pages: Pointer to an array of page pointers 530 531 * @n_pages: Number of pages in the pages array 531 532 * @offset: Offset from start of the first page to the start of a buffer 532 533 * @size: Number of valid bytes in the buffer (after offset) 534 + * @max_segment: Maximum size of a scatterlist element in bytes 533 535 * @gfp_mask: GFP allocation mask 534 536 * 535 537 * Description: 536 538 * Allocate and initialize an sg table from a list of pages. Contiguous 537 - * ranges of the pages are squashed into a single scatterlist node. A user 538 - * may provide an offset at a start and a size of valid data in a buffer 539 - * specified by the page array. The returned sg table is released by 540 - * sg_free_table. 539 + * ranges of the pages are squashed into a single scatterlist node up to the 540 + * maximum size specified in @max_segment. A user may provide an offset at a 541 + * start and a size of valid data in a buffer specified by the page array. 541 542 * 542 - * Returns: 543 + * The returned sg table is released by sg_free_table. 544 + * 545 + * Returns: 543 546 * 0 on success, negative error on failure 544 547 */ 545 - int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, 546 - unsigned int n_pages, unsigned int offset, 547 - unsigned long size, gfp_t gfp_mask) 548 + int sg_alloc_table_from_pages_segment(struct sg_table *sgt, struct page **pages, 549 + unsigned int n_pages, unsigned int offset, 550 + unsigned long size, unsigned int max_segment, 551 + gfp_t gfp_mask) 548 552 { 549 - return PTR_ERR_OR_ZERO(__sg_alloc_table_from_pages(sgt, pages, n_pages, 550 - offset, size, UINT_MAX, NULL, 0, gfp_mask)); 553 + return PTR_ERR_OR_ZERO(sg_alloc_append_table_from_pages(sgt, pages, 554 + n_pages, offset, size, max_segment, NULL, 0, gfp_mask)); 551 555 } 552 - EXPORT_SYMBOL(sg_alloc_table_from_pages); 556 + EXPORT_SYMBOL(sg_alloc_table_from_pages_segment); 553 557 554 558 #ifdef CONFIG_SGL_ALLOC 555 559
+18 -7
tools/testing/scatterlist/main.c
··· 87 87 int left_pages = test->pfn_app ? test->num_pages : 0; 88 88 struct page *pages[MAX_PAGES]; 89 89 struct sg_table st; 90 - struct scatterlist *sg; 90 + struct scatterlist *sg = NULL; 91 + int ret; 91 92 92 93 set_pages(pages, test->pfn, test->num_pages); 93 94 94 - sg = __sg_alloc_table_from_pages(&st, pages, test->num_pages, 0, 95 - test->size, test->max_seg, NULL, left_pages, GFP_KERNEL); 96 - assert(PTR_ERR_OR_ZERO(sg) == test->alloc_ret); 95 + if (test->pfn_app) { 96 + sg = sg_alloc_append_table_from_pages( 97 + &st, pages, test->num_pages, 0, test->size, 98 + test->max_seg, NULL, left_pages, GFP_KERNEL); 99 + assert(PTR_ERR_OR_ZERO(sg) == test->alloc_ret); 100 + } else { 101 + ret = sg_alloc_table_from_pages_segment( 102 + &st, pages, test->num_pages, 0, test->size, 103 + test->max_seg, GFP_KERNEL); 104 + assert(ret == test->alloc_ret); 105 + } 97 106 98 107 if (test->alloc_ret) 99 108 continue; 100 109 101 110 if (test->pfn_app) { 102 111 set_pages(pages, test->pfn_app, test->num_pages); 103 - sg = __sg_alloc_table_from_pages(&st, pages, test->num_pages, 0, 104 - test->size, test->max_seg, sg, 0, GFP_KERNEL); 112 + sg = sg_alloc_append_table_from_pages( 113 + &st, pages, test->num_pages, 0, test->size, 114 + test->max_seg, sg, 0, GFP_KERNEL); 105 115 106 116 assert(PTR_ERR_OR_ZERO(sg) == test->alloc_ret); 107 117 } 108 118 109 119 VALIDATE(st.nents == test->expected_segments, &st, test); 110 120 if (!test->pfn_app) 111 - VALIDATE(st.orig_nents == test->expected_segments, &st, test); 121 + VALIDATE(st.orig_nents == test->expected_segments, &st, 122 + test); 112 123 113 124 sg_free_table(&st); 114 125 }