Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

gpu: Move DRM buddy allocator one level up (part two)

Move the DRM buddy allocator one level up so that it can be used by GPU
drivers (example, nova-core) that have usecases other than DRM (such as
VFIO vGPU support). Modify the API, structures and Kconfigs to use
"gpu_buddy" terminology. Adapt the drivers and tests to use the new API.

The commit cannot be split due to bisectability, however no functional
change is intended. Verified by running K-UNIT tests and build tested
various configurations.

Signed-off-by: Joel Fernandes <joelagnelf@nvidia.com>
Reviewed-by: Dave Airlie <airlied@redhat.com>
[airlied: I've split this into two so git can find copies easier.
I've also just nuked drm_random library, that stuff needs to be done
elsewhere and only the buddy tests seem to be using it].
Signed-off-by: Dave Airlie <airlied@redhat.com>

authored by

Joel Fernandes and committed by
Dave Airlie
ba110db8 4a9671a0

+853 -739
+6
Documentation/gpu/drm-mm.rst
··· 532 532 .. kernel-doc:: drivers/gpu/buddy.c 533 533 :export: 534 534 535 + DRM Buddy Specific Logging Function References 536 + ---------------------------------------------- 537 + 538 + .. kernel-doc:: drivers/gpu/drm/drm_buddy.c 539 + :export: 540 + 535 541 DRM Cache Handling and Fast WC memcpy() 536 542 ======================================= 537 543
+5 -3
MAINTAINERS
··· 8797 8797 F: drivers/gpu/drm/ttm/ 8798 8798 F: include/drm/ttm/ 8799 8799 8800 - DRM BUDDY ALLOCATOR 8800 + GPU BUDDY ALLOCATOR 8801 8801 M: Matthew Auld <matthew.auld@intel.com> 8802 8802 M: Arun Pravin <arunpravin.paneerselvam@amd.com> 8803 8803 R: Christian Koenig <christian.koenig@amd.com> 8804 8804 L: dri-devel@lists.freedesktop.org 8805 8805 S: Maintained 8806 8806 T: git https://gitlab.freedesktop.org/drm/misc/kernel.git 8807 - F: drivers/gpu/drm/drm_buddy.c 8808 - F: drivers/gpu/drm/tests/drm_buddy_test.c 8807 + F: drivers/gpu/drm_buddy.c 8808 + F: drivers/gpu/buddy.c 8809 + F: drivers/gpu/tests/gpu_buddy_test.c 8810 + F: include/linux/gpu_buddy.h 8809 8811 F: include/drm/drm_buddy.h 8810 8812 8811 8813 DRM AUTOMATED TESTING
+13
drivers/gpu/Kconfig
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + 3 + config GPU_BUDDY 4 + bool 5 + help 6 + A page based buddy allocator for GPU memory. 7 + 8 + config GPU_BUDDY_KUNIT_TEST 9 + tristate "KUnit tests for GPU buddy allocator" if !KUNIT_ALL_TESTS 10 + depends on GPU_BUDDY && KUNIT 11 + default KUNIT_ALL_TESTS 12 + help 13 + KUnit tests for the GPU buddy allocator.
+1
drivers/gpu/Makefile
··· 6 6 obj-$(CONFIG_IMX_IPUV3_CORE) += ipu-v3/ 7 7 obj-$(CONFIG_TRACE_GPU_MEM) += trace/ 8 8 obj-$(CONFIG_NOVA_CORE) += nova-core/ 9 + obj-$(CONFIG_GPU_BUDDY) += buddy.o
+271 -285
drivers/gpu/buddy.c
··· 11 11 #include <linux/sizes.h> 12 12 13 13 #include <linux/gpu_buddy.h> 14 - #include <drm/drm_print.h> 15 - 16 - enum drm_buddy_free_tree { 17 - DRM_BUDDY_CLEAR_TREE = 0, 18 - DRM_BUDDY_DIRTY_TREE, 19 - DRM_BUDDY_MAX_FREE_TREES, 20 - }; 21 14 22 15 static struct kmem_cache *slab_blocks; 23 16 24 - #define for_each_free_tree(tree) \ 25 - for ((tree) = 0; (tree) < DRM_BUDDY_MAX_FREE_TREES; (tree)++) 26 - 27 - static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm, 28 - struct drm_buddy_block *parent, 17 + static struct gpu_buddy_block *gpu_block_alloc(struct gpu_buddy *mm, 18 + struct gpu_buddy_block *parent, 29 19 unsigned int order, 30 20 u64 offset) 31 21 { 32 - struct drm_buddy_block *block; 22 + struct gpu_buddy_block *block; 33 23 34 - BUG_ON(order > DRM_BUDDY_MAX_ORDER); 24 + BUG_ON(order > GPU_BUDDY_MAX_ORDER); 35 25 36 26 block = kmem_cache_zalloc(slab_blocks, GFP_KERNEL); 37 27 if (!block) ··· 33 43 34 44 RB_CLEAR_NODE(&block->rb); 35 45 36 - BUG_ON(block->header & DRM_BUDDY_HEADER_UNUSED); 46 + BUG_ON(block->header & GPU_BUDDY_HEADER_UNUSED); 37 47 return block; 38 48 } 39 49 40 - static void drm_block_free(struct drm_buddy *mm, 41 - struct drm_buddy_block *block) 50 + static void gpu_block_free(struct gpu_buddy *mm, 51 + struct gpu_buddy_block *block) 42 52 { 43 53 kmem_cache_free(slab_blocks, block); 44 54 } 45 55 46 - static enum drm_buddy_free_tree 47 - get_block_tree(struct drm_buddy_block *block) 56 + static enum gpu_buddy_free_tree 57 + get_block_tree(struct gpu_buddy_block *block) 48 58 { 49 - return drm_buddy_block_is_clear(block) ? 50 - DRM_BUDDY_CLEAR_TREE : DRM_BUDDY_DIRTY_TREE; 59 + return gpu_buddy_block_is_clear(block) ? 60 + GPU_BUDDY_CLEAR_TREE : GPU_BUDDY_DIRTY_TREE; 51 61 } 52 62 53 - static struct drm_buddy_block * 63 + static struct gpu_buddy_block * 54 64 rbtree_get_free_block(const struct rb_node *node) 55 65 { 56 - return node ? rb_entry(node, struct drm_buddy_block, rb) : NULL; 66 + return node ? rb_entry(node, struct gpu_buddy_block, rb) : NULL; 57 67 } 58 68 59 - static struct drm_buddy_block * 69 + static struct gpu_buddy_block * 60 70 rbtree_last_free_block(struct rb_root *root) 61 71 { 62 72 return rbtree_get_free_block(rb_last(root)); ··· 67 77 return RB_EMPTY_ROOT(root); 68 78 } 69 79 70 - static bool drm_buddy_block_offset_less(const struct drm_buddy_block *block, 71 - const struct drm_buddy_block *node) 80 + static bool gpu_buddy_block_offset_less(const struct gpu_buddy_block *block, 81 + const struct gpu_buddy_block *node) 72 82 { 73 - return drm_buddy_block_offset(block) < drm_buddy_block_offset(node); 83 + return gpu_buddy_block_offset(block) < gpu_buddy_block_offset(node); 74 84 } 75 85 76 86 static bool rbtree_block_offset_less(struct rb_node *block, 77 87 const struct rb_node *node) 78 88 { 79 - return drm_buddy_block_offset_less(rbtree_get_free_block(block), 89 + return gpu_buddy_block_offset_less(rbtree_get_free_block(block), 80 90 rbtree_get_free_block(node)); 81 91 } 82 92 83 - static void rbtree_insert(struct drm_buddy *mm, 84 - struct drm_buddy_block *block, 85 - enum drm_buddy_free_tree tree) 93 + static void rbtree_insert(struct gpu_buddy *mm, 94 + struct gpu_buddy_block *block, 95 + enum gpu_buddy_free_tree tree) 86 96 { 87 97 rb_add(&block->rb, 88 - &mm->free_trees[tree][drm_buddy_block_order(block)], 98 + &mm->free_trees[tree][gpu_buddy_block_order(block)], 89 99 rbtree_block_offset_less); 90 100 } 91 101 92 - static void rbtree_remove(struct drm_buddy *mm, 93 - struct drm_buddy_block *block) 102 + static void rbtree_remove(struct gpu_buddy *mm, 103 + struct gpu_buddy_block *block) 94 104 { 95 - unsigned int order = drm_buddy_block_order(block); 96 - enum drm_buddy_free_tree tree; 105 + unsigned int order = gpu_buddy_block_order(block); 106 + enum gpu_buddy_free_tree tree; 97 107 struct rb_root *root; 98 108 99 109 tree = get_block_tree(block); ··· 103 113 RB_CLEAR_NODE(&block->rb); 104 114 } 105 115 106 - static void clear_reset(struct drm_buddy_block *block) 116 + static void clear_reset(struct gpu_buddy_block *block) 107 117 { 108 - block->header &= ~DRM_BUDDY_HEADER_CLEAR; 118 + block->header &= ~GPU_BUDDY_HEADER_CLEAR; 109 119 } 110 120 111 - static void mark_cleared(struct drm_buddy_block *block) 121 + static void mark_cleared(struct gpu_buddy_block *block) 112 122 { 113 - block->header |= DRM_BUDDY_HEADER_CLEAR; 123 + block->header |= GPU_BUDDY_HEADER_CLEAR; 114 124 } 115 125 116 - static void mark_allocated(struct drm_buddy *mm, 117 - struct drm_buddy_block *block) 126 + static void mark_allocated(struct gpu_buddy *mm, 127 + struct gpu_buddy_block *block) 118 128 { 119 - block->header &= ~DRM_BUDDY_HEADER_STATE; 120 - block->header |= DRM_BUDDY_ALLOCATED; 129 + block->header &= ~GPU_BUDDY_HEADER_STATE; 130 + block->header |= GPU_BUDDY_ALLOCATED; 121 131 122 132 rbtree_remove(mm, block); 123 133 } 124 134 125 - static void mark_free(struct drm_buddy *mm, 126 - struct drm_buddy_block *block) 135 + static void mark_free(struct gpu_buddy *mm, 136 + struct gpu_buddy_block *block) 127 137 { 128 - enum drm_buddy_free_tree tree; 138 + enum gpu_buddy_free_tree tree; 129 139 130 - block->header &= ~DRM_BUDDY_HEADER_STATE; 131 - block->header |= DRM_BUDDY_FREE; 140 + block->header &= ~GPU_BUDDY_HEADER_STATE; 141 + block->header |= GPU_BUDDY_FREE; 132 142 133 143 tree = get_block_tree(block); 134 144 rbtree_insert(mm, block, tree); 135 145 } 136 146 137 - static void mark_split(struct drm_buddy *mm, 138 - struct drm_buddy_block *block) 147 + static void mark_split(struct gpu_buddy *mm, 148 + struct gpu_buddy_block *block) 139 149 { 140 - block->header &= ~DRM_BUDDY_HEADER_STATE; 141 - block->header |= DRM_BUDDY_SPLIT; 150 + block->header &= ~GPU_BUDDY_HEADER_STATE; 151 + block->header |= GPU_BUDDY_SPLIT; 142 152 143 153 rbtree_remove(mm, block); 144 154 } ··· 153 163 return s1 <= s2 && e1 >= e2; 154 164 } 155 165 156 - static struct drm_buddy_block * 157 - __get_buddy(struct drm_buddy_block *block) 166 + static struct gpu_buddy_block * 167 + __get_buddy(struct gpu_buddy_block *block) 158 168 { 159 - struct drm_buddy_block *parent; 169 + struct gpu_buddy_block *parent; 160 170 161 171 parent = block->parent; 162 172 if (!parent) ··· 168 178 return parent->left; 169 179 } 170 180 171 - static unsigned int __drm_buddy_free(struct drm_buddy *mm, 172 - struct drm_buddy_block *block, 181 + static unsigned int __gpu_buddy_free(struct gpu_buddy *mm, 182 + struct gpu_buddy_block *block, 173 183 bool force_merge) 174 184 { 175 - struct drm_buddy_block *parent; 185 + struct gpu_buddy_block *parent; 176 186 unsigned int order; 177 187 178 188 while ((parent = block->parent)) { 179 - struct drm_buddy_block *buddy; 189 + struct gpu_buddy_block *buddy; 180 190 181 191 buddy = __get_buddy(block); 182 192 183 - if (!drm_buddy_block_is_free(buddy)) 193 + if (!gpu_buddy_block_is_free(buddy)) 184 194 break; 185 195 186 196 if (!force_merge) { ··· 188 198 * Check the block and its buddy clear state and exit 189 199 * the loop if they both have the dissimilar state. 190 200 */ 191 - if (drm_buddy_block_is_clear(block) != 192 - drm_buddy_block_is_clear(buddy)) 201 + if (gpu_buddy_block_is_clear(block) != 202 + gpu_buddy_block_is_clear(buddy)) 193 203 break; 194 204 195 - if (drm_buddy_block_is_clear(block)) 205 + if (gpu_buddy_block_is_clear(block)) 196 206 mark_cleared(parent); 197 207 } 198 208 199 209 rbtree_remove(mm, buddy); 200 - if (force_merge && drm_buddy_block_is_clear(buddy)) 201 - mm->clear_avail -= drm_buddy_block_size(mm, buddy); 210 + if (force_merge && gpu_buddy_block_is_clear(buddy)) 211 + mm->clear_avail -= gpu_buddy_block_size(mm, buddy); 202 212 203 - drm_block_free(mm, block); 204 - drm_block_free(mm, buddy); 213 + gpu_block_free(mm, block); 214 + gpu_block_free(mm, buddy); 205 215 206 216 block = parent; 207 217 } 208 218 209 - order = drm_buddy_block_order(block); 219 + order = gpu_buddy_block_order(block); 210 220 mark_free(mm, block); 211 221 212 222 return order; 213 223 } 214 224 215 - static int __force_merge(struct drm_buddy *mm, 225 + static int __force_merge(struct gpu_buddy *mm, 216 226 u64 start, 217 227 u64 end, 218 228 unsigned int min_order) ··· 231 241 struct rb_node *iter = rb_last(&mm->free_trees[tree][i]); 232 242 233 243 while (iter) { 234 - struct drm_buddy_block *block, *buddy; 244 + struct gpu_buddy_block *block, *buddy; 235 245 u64 block_start, block_end; 236 246 237 247 block = rbtree_get_free_block(iter); ··· 240 250 if (!block || !block->parent) 241 251 continue; 242 252 243 - block_start = drm_buddy_block_offset(block); 244 - block_end = block_start + drm_buddy_block_size(mm, block) - 1; 253 + block_start = gpu_buddy_block_offset(block); 254 + block_end = block_start + gpu_buddy_block_size(mm, block) - 1; 245 255 246 256 if (!contains(start, end, block_start, block_end)) 247 257 continue; 248 258 249 259 buddy = __get_buddy(block); 250 - if (!drm_buddy_block_is_free(buddy)) 260 + if (!gpu_buddy_block_is_free(buddy)) 251 261 continue; 252 262 253 - WARN_ON(drm_buddy_block_is_clear(block) == 254 - drm_buddy_block_is_clear(buddy)); 263 + WARN_ON(gpu_buddy_block_is_clear(block) == 264 + gpu_buddy_block_is_clear(buddy)); 255 265 256 266 /* 257 267 * Advance to the next node when the current node is the buddy, ··· 261 271 iter = rb_prev(iter); 262 272 263 273 rbtree_remove(mm, block); 264 - if (drm_buddy_block_is_clear(block)) 265 - mm->clear_avail -= drm_buddy_block_size(mm, block); 274 + if (gpu_buddy_block_is_clear(block)) 275 + mm->clear_avail -= gpu_buddy_block_size(mm, block); 266 276 267 - order = __drm_buddy_free(mm, block, true); 277 + order = __gpu_buddy_free(mm, block, true); 268 278 if (order >= min_order) 269 279 return 0; 270 280 } ··· 275 285 } 276 286 277 287 /** 278 - * drm_buddy_init - init memory manager 288 + * gpu_buddy_init - init memory manager 279 289 * 280 - * @mm: DRM buddy manager to initialize 290 + * @mm: GPU buddy manager to initialize 281 291 * @size: size in bytes to manage 282 292 * @chunk_size: minimum page size in bytes for our allocations 283 293 * ··· 286 296 * Returns: 287 297 * 0 on success, error code on failure. 288 298 */ 289 - int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size) 299 + int gpu_buddy_init(struct gpu_buddy *mm, u64 size, u64 chunk_size) 290 300 { 291 301 unsigned int i, j, root_count = 0; 292 302 u64 offset = 0; ··· 308 318 mm->chunk_size = chunk_size; 309 319 mm->max_order = ilog2(size) - ilog2(chunk_size); 310 320 311 - BUG_ON(mm->max_order > DRM_BUDDY_MAX_ORDER); 321 + BUG_ON(mm->max_order > GPU_BUDDY_MAX_ORDER); 312 322 313 - mm->free_trees = kmalloc_array(DRM_BUDDY_MAX_FREE_TREES, 323 + mm->free_trees = kmalloc_array(GPU_BUDDY_MAX_FREE_TREES, 314 324 sizeof(*mm->free_trees), 315 325 GFP_KERNEL); 316 326 if (!mm->free_trees) ··· 330 340 mm->n_roots = hweight64(size); 331 341 332 342 mm->roots = kmalloc_array(mm->n_roots, 333 - sizeof(struct drm_buddy_block *), 343 + sizeof(struct gpu_buddy_block *), 334 344 GFP_KERNEL); 335 345 if (!mm->roots) 336 346 goto out_free_tree; ··· 340 350 * not itself a power-of-two. 341 351 */ 342 352 do { 343 - struct drm_buddy_block *root; 353 + struct gpu_buddy_block *root; 344 354 unsigned int order; 345 355 u64 root_size; 346 356 347 357 order = ilog2(size) - ilog2(chunk_size); 348 358 root_size = chunk_size << order; 349 359 350 - root = drm_block_alloc(mm, NULL, order, offset); 360 + root = gpu_block_alloc(mm, NULL, order, offset); 351 361 if (!root) 352 362 goto out_free_roots; 353 363 354 364 mark_free(mm, root); 355 365 356 366 BUG_ON(root_count > mm->max_order); 357 - BUG_ON(drm_buddy_block_size(mm, root) < chunk_size); 367 + BUG_ON(gpu_buddy_block_size(mm, root) < chunk_size); 358 368 359 369 mm->roots[root_count] = root; 360 370 ··· 367 377 368 378 out_free_roots: 369 379 while (root_count--) 370 - drm_block_free(mm, mm->roots[root_count]); 380 + gpu_block_free(mm, mm->roots[root_count]); 371 381 kfree(mm->roots); 372 382 out_free_tree: 373 383 while (i--) ··· 375 385 kfree(mm->free_trees); 376 386 return -ENOMEM; 377 387 } 378 - EXPORT_SYMBOL(drm_buddy_init); 388 + EXPORT_SYMBOL(gpu_buddy_init); 379 389 380 390 /** 381 - * drm_buddy_fini - tear down the memory manager 391 + * gpu_buddy_fini - tear down the memory manager 382 392 * 383 - * @mm: DRM buddy manager to free 393 + * @mm: GPU buddy manager to free 384 394 * 385 395 * Cleanup memory manager resources and the freetree 386 396 */ 387 - void drm_buddy_fini(struct drm_buddy *mm) 397 + void gpu_buddy_fini(struct gpu_buddy *mm) 388 398 { 389 399 u64 root_size, size, start; 390 400 unsigned int order; ··· 394 404 395 405 for (i = 0; i < mm->n_roots; ++i) { 396 406 order = ilog2(size) - ilog2(mm->chunk_size); 397 - start = drm_buddy_block_offset(mm->roots[i]); 407 + start = gpu_buddy_block_offset(mm->roots[i]); 398 408 __force_merge(mm, start, start + size, order); 399 409 400 - if (WARN_ON(!drm_buddy_block_is_free(mm->roots[i]))) 410 + if (WARN_ON(!gpu_buddy_block_is_free(mm->roots[i]))) 401 411 kunit_fail_current_test("buddy_fini() root"); 402 412 403 - drm_block_free(mm, mm->roots[i]); 413 + gpu_block_free(mm, mm->roots[i]); 404 414 405 415 root_size = mm->chunk_size << order; 406 416 size -= root_size; ··· 413 423 kfree(mm->free_trees); 414 424 kfree(mm->roots); 415 425 } 416 - EXPORT_SYMBOL(drm_buddy_fini); 426 + EXPORT_SYMBOL(gpu_buddy_fini); 417 427 418 - static int split_block(struct drm_buddy *mm, 419 - struct drm_buddy_block *block) 428 + static int split_block(struct gpu_buddy *mm, 429 + struct gpu_buddy_block *block) 420 430 { 421 - unsigned int block_order = drm_buddy_block_order(block) - 1; 422 - u64 offset = drm_buddy_block_offset(block); 431 + unsigned int block_order = gpu_buddy_block_order(block) - 1; 432 + u64 offset = gpu_buddy_block_offset(block); 423 433 424 - BUG_ON(!drm_buddy_block_is_free(block)); 425 - BUG_ON(!drm_buddy_block_order(block)); 434 + BUG_ON(!gpu_buddy_block_is_free(block)); 435 + BUG_ON(!gpu_buddy_block_order(block)); 426 436 427 - block->left = drm_block_alloc(mm, block, block_order, offset); 437 + block->left = gpu_block_alloc(mm, block, block_order, offset); 428 438 if (!block->left) 429 439 return -ENOMEM; 430 440 431 - block->right = drm_block_alloc(mm, block, block_order, 441 + block->right = gpu_block_alloc(mm, block, block_order, 432 442 offset + (mm->chunk_size << block_order)); 433 443 if (!block->right) { 434 - drm_block_free(mm, block->left); 444 + gpu_block_free(mm, block->left); 435 445 return -ENOMEM; 436 446 } 437 447 438 448 mark_split(mm, block); 439 449 440 - if (drm_buddy_block_is_clear(block)) { 450 + if (gpu_buddy_block_is_clear(block)) { 441 451 mark_cleared(block->left); 442 452 mark_cleared(block->right); 443 453 clear_reset(block); ··· 450 460 } 451 461 452 462 /** 453 - * drm_get_buddy - get buddy address 463 + * gpu_get_buddy - get buddy address 454 464 * 455 - * @block: DRM buddy block 465 + * @block: GPU buddy block 456 466 * 457 467 * Returns the corresponding buddy block for @block, or NULL 458 468 * if this is a root block and can't be merged further. 459 469 * Requires some kind of locking to protect against 460 470 * any concurrent allocate and free operations. 461 471 */ 462 - struct drm_buddy_block * 463 - drm_get_buddy(struct drm_buddy_block *block) 472 + struct gpu_buddy_block * 473 + gpu_get_buddy(struct gpu_buddy_block *block) 464 474 { 465 475 return __get_buddy(block); 466 476 } 467 - EXPORT_SYMBOL(drm_get_buddy); 477 + EXPORT_SYMBOL(gpu_get_buddy); 468 478 469 479 /** 470 - * drm_buddy_reset_clear - reset blocks clear state 480 + * gpu_buddy_reset_clear - reset blocks clear state 471 481 * 472 - * @mm: DRM buddy manager 482 + * @mm: GPU buddy manager 473 483 * @is_clear: blocks clear state 474 484 * 475 485 * Reset the clear state based on @is_clear value for each block 476 486 * in the freetree. 477 487 */ 478 - void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear) 488 + void gpu_buddy_reset_clear(struct gpu_buddy *mm, bool is_clear) 479 489 { 480 - enum drm_buddy_free_tree src_tree, dst_tree; 490 + enum gpu_buddy_free_tree src_tree, dst_tree; 481 491 u64 root_size, size, start; 482 492 unsigned int order; 483 493 int i; ··· 485 495 size = mm->size; 486 496 for (i = 0; i < mm->n_roots; ++i) { 487 497 order = ilog2(size) - ilog2(mm->chunk_size); 488 - start = drm_buddy_block_offset(mm->roots[i]); 498 + start = gpu_buddy_block_offset(mm->roots[i]); 489 499 __force_merge(mm, start, start + size, order); 490 500 491 501 root_size = mm->chunk_size << order; 492 502 size -= root_size; 493 503 } 494 504 495 - src_tree = is_clear ? DRM_BUDDY_DIRTY_TREE : DRM_BUDDY_CLEAR_TREE; 496 - dst_tree = is_clear ? DRM_BUDDY_CLEAR_TREE : DRM_BUDDY_DIRTY_TREE; 505 + src_tree = is_clear ? GPU_BUDDY_DIRTY_TREE : GPU_BUDDY_CLEAR_TREE; 506 + dst_tree = is_clear ? GPU_BUDDY_CLEAR_TREE : GPU_BUDDY_DIRTY_TREE; 497 507 498 508 for (i = 0; i <= mm->max_order; ++i) { 499 509 struct rb_root *root = &mm->free_trees[src_tree][i]; 500 - struct drm_buddy_block *block, *tmp; 510 + struct gpu_buddy_block *block, *tmp; 501 511 502 512 rbtree_postorder_for_each_entry_safe(block, tmp, root, rb) { 503 513 rbtree_remove(mm, block); 504 514 if (is_clear) { 505 515 mark_cleared(block); 506 - mm->clear_avail += drm_buddy_block_size(mm, block); 516 + mm->clear_avail += gpu_buddy_block_size(mm, block); 507 517 } else { 508 518 clear_reset(block); 509 - mm->clear_avail -= drm_buddy_block_size(mm, block); 519 + mm->clear_avail -= gpu_buddy_block_size(mm, block); 510 520 } 511 521 512 522 rbtree_insert(mm, block, dst_tree); 513 523 } 514 524 } 515 525 } 516 - EXPORT_SYMBOL(drm_buddy_reset_clear); 526 + EXPORT_SYMBOL(gpu_buddy_reset_clear); 517 527 518 528 /** 519 - * drm_buddy_free_block - free a block 529 + * gpu_buddy_free_block - free a block 520 530 * 521 - * @mm: DRM buddy manager 531 + * @mm: GPU buddy manager 522 532 * @block: block to be freed 523 533 */ 524 - void drm_buddy_free_block(struct drm_buddy *mm, 525 - struct drm_buddy_block *block) 534 + void gpu_buddy_free_block(struct gpu_buddy *mm, 535 + struct gpu_buddy_block *block) 526 536 { 527 - BUG_ON(!drm_buddy_block_is_allocated(block)); 528 - mm->avail += drm_buddy_block_size(mm, block); 529 - if (drm_buddy_block_is_clear(block)) 530 - mm->clear_avail += drm_buddy_block_size(mm, block); 537 + BUG_ON(!gpu_buddy_block_is_allocated(block)); 538 + mm->avail += gpu_buddy_block_size(mm, block); 539 + if (gpu_buddy_block_is_clear(block)) 540 + mm->clear_avail += gpu_buddy_block_size(mm, block); 531 541 532 - __drm_buddy_free(mm, block, false); 542 + __gpu_buddy_free(mm, block, false); 533 543 } 534 - EXPORT_SYMBOL(drm_buddy_free_block); 544 + EXPORT_SYMBOL(gpu_buddy_free_block); 535 545 536 - static void __drm_buddy_free_list(struct drm_buddy *mm, 546 + static void __gpu_buddy_free_list(struct gpu_buddy *mm, 537 547 struct list_head *objects, 538 548 bool mark_clear, 539 549 bool mark_dirty) 540 550 { 541 - struct drm_buddy_block *block, *on; 551 + struct gpu_buddy_block *block, *on; 542 552 543 553 WARN_ON(mark_dirty && mark_clear); 544 554 ··· 547 557 mark_cleared(block); 548 558 else if (mark_dirty) 549 559 clear_reset(block); 550 - drm_buddy_free_block(mm, block); 560 + gpu_buddy_free_block(mm, block); 551 561 cond_resched(); 552 562 } 553 563 INIT_LIST_HEAD(objects); 554 564 } 555 565 556 - static void drm_buddy_free_list_internal(struct drm_buddy *mm, 566 + static void gpu_buddy_free_list_internal(struct gpu_buddy *mm, 557 567 struct list_head *objects) 558 568 { 559 569 /* ··· 561 571 * at this point. For example we might have just failed part of the 562 572 * allocation. 563 573 */ 564 - __drm_buddy_free_list(mm, objects, false, false); 574 + __gpu_buddy_free_list(mm, objects, false, false); 565 575 } 566 576 567 577 /** 568 - * drm_buddy_free_list - free blocks 578 + * gpu_buddy_free_list - free blocks 569 579 * 570 - * @mm: DRM buddy manager 580 + * @mm: GPU buddy manager 571 581 * @objects: input list head to free blocks 572 - * @flags: optional flags like DRM_BUDDY_CLEARED 582 + * @flags: optional flags like GPU_BUDDY_CLEARED 573 583 */ 574 - void drm_buddy_free_list(struct drm_buddy *mm, 584 + void gpu_buddy_free_list(struct gpu_buddy *mm, 575 585 struct list_head *objects, 576 586 unsigned int flags) 577 587 { 578 - bool mark_clear = flags & DRM_BUDDY_CLEARED; 588 + bool mark_clear = flags & GPU_BUDDY_CLEARED; 579 589 580 - __drm_buddy_free_list(mm, objects, mark_clear, !mark_clear); 590 + __gpu_buddy_free_list(mm, objects, mark_clear, !mark_clear); 581 591 } 582 - EXPORT_SYMBOL(drm_buddy_free_list); 592 + EXPORT_SYMBOL(gpu_buddy_free_list); 583 593 584 - static bool block_incompatible(struct drm_buddy_block *block, unsigned int flags) 594 + static bool block_incompatible(struct gpu_buddy_block *block, unsigned int flags) 585 595 { 586 - bool needs_clear = flags & DRM_BUDDY_CLEAR_ALLOCATION; 596 + bool needs_clear = flags & GPU_BUDDY_CLEAR_ALLOCATION; 587 597 588 - return needs_clear != drm_buddy_block_is_clear(block); 598 + return needs_clear != gpu_buddy_block_is_clear(block); 589 599 } 590 600 591 - static struct drm_buddy_block * 592 - __alloc_range_bias(struct drm_buddy *mm, 601 + static struct gpu_buddy_block * 602 + __alloc_range_bias(struct gpu_buddy *mm, 593 603 u64 start, u64 end, 594 604 unsigned int order, 595 605 unsigned long flags, 596 606 bool fallback) 597 607 { 598 608 u64 req_size = mm->chunk_size << order; 599 - struct drm_buddy_block *block; 600 - struct drm_buddy_block *buddy; 609 + struct gpu_buddy_block *block; 610 + struct gpu_buddy_block *buddy; 601 611 LIST_HEAD(dfs); 602 612 int err; 603 613 int i; ··· 612 622 u64 block_end; 613 623 614 624 block = list_first_entry_or_null(&dfs, 615 - struct drm_buddy_block, 625 + struct gpu_buddy_block, 616 626 tmp_link); 617 627 if (!block) 618 628 break; 619 629 620 630 list_del(&block->tmp_link); 621 631 622 - if (drm_buddy_block_order(block) < order) 632 + if (gpu_buddy_block_order(block) < order) 623 633 continue; 624 634 625 - block_start = drm_buddy_block_offset(block); 626 - block_end = block_start + drm_buddy_block_size(mm, block) - 1; 635 + block_start = gpu_buddy_block_offset(block); 636 + block_end = block_start + gpu_buddy_block_size(mm, block) - 1; 627 637 628 638 if (!overlaps(start, end, block_start, block_end)) 629 639 continue; 630 640 631 - if (drm_buddy_block_is_allocated(block)) 641 + if (gpu_buddy_block_is_allocated(block)) 632 642 continue; 633 643 634 644 if (block_start < start || block_end > end) { ··· 644 654 continue; 645 655 646 656 if (contains(start, end, block_start, block_end) && 647 - order == drm_buddy_block_order(block)) { 657 + order == gpu_buddy_block_order(block)) { 648 658 /* 649 659 * Find the free block within the range. 650 660 */ 651 - if (drm_buddy_block_is_free(block)) 661 + if (gpu_buddy_block_is_free(block)) 652 662 return block; 653 663 654 664 continue; 655 665 } 656 666 657 - if (!drm_buddy_block_is_split(block)) { 667 + if (!gpu_buddy_block_is_split(block)) { 658 668 err = split_block(mm, block); 659 669 if (unlikely(err)) 660 670 goto err_undo; ··· 674 684 */ 675 685 buddy = __get_buddy(block); 676 686 if (buddy && 677 - (drm_buddy_block_is_free(block) && 678 - drm_buddy_block_is_free(buddy))) 679 - __drm_buddy_free(mm, block, false); 687 + (gpu_buddy_block_is_free(block) && 688 + gpu_buddy_block_is_free(buddy))) 689 + __gpu_buddy_free(mm, block, false); 680 690 return ERR_PTR(err); 681 691 } 682 692 683 - static struct drm_buddy_block * 684 - __drm_buddy_alloc_range_bias(struct drm_buddy *mm, 693 + static struct gpu_buddy_block * 694 + __gpu_buddy_alloc_range_bias(struct gpu_buddy *mm, 685 695 u64 start, u64 end, 686 696 unsigned int order, 687 697 unsigned long flags) 688 698 { 689 - struct drm_buddy_block *block; 699 + struct gpu_buddy_block *block; 690 700 bool fallback = false; 691 701 692 702 block = __alloc_range_bias(mm, start, end, order, ··· 698 708 return block; 699 709 } 700 710 701 - static struct drm_buddy_block * 702 - get_maxblock(struct drm_buddy *mm, 711 + static struct gpu_buddy_block * 712 + get_maxblock(struct gpu_buddy *mm, 703 713 unsigned int order, 704 - enum drm_buddy_free_tree tree) 714 + enum gpu_buddy_free_tree tree) 705 715 { 706 - struct drm_buddy_block *max_block = NULL, *block = NULL; 716 + struct gpu_buddy_block *max_block = NULL, *block = NULL; 707 717 struct rb_root *root; 708 718 unsigned int i; 709 719 ··· 718 728 continue; 719 729 } 720 730 721 - if (drm_buddy_block_offset(block) > 722 - drm_buddy_block_offset(max_block)) { 731 + if (gpu_buddy_block_offset(block) > 732 + gpu_buddy_block_offset(max_block)) { 723 733 max_block = block; 724 734 } 725 735 } ··· 727 737 return max_block; 728 738 } 729 739 730 - static struct drm_buddy_block * 731 - alloc_from_freetree(struct drm_buddy *mm, 740 + static struct gpu_buddy_block * 741 + alloc_from_freetree(struct gpu_buddy *mm, 732 742 unsigned int order, 733 743 unsigned long flags) 734 744 { 735 - struct drm_buddy_block *block = NULL; 745 + struct gpu_buddy_block *block = NULL; 736 746 struct rb_root *root; 737 - enum drm_buddy_free_tree tree; 747 + enum gpu_buddy_free_tree tree; 738 748 unsigned int tmp; 739 749 int err; 740 750 741 - tree = (flags & DRM_BUDDY_CLEAR_ALLOCATION) ? 742 - DRM_BUDDY_CLEAR_TREE : DRM_BUDDY_DIRTY_TREE; 751 + tree = (flags & GPU_BUDDY_CLEAR_ALLOCATION) ? 752 + GPU_BUDDY_CLEAR_TREE : GPU_BUDDY_DIRTY_TREE; 743 753 744 - if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) { 754 + if (flags & GPU_BUDDY_TOPDOWN_ALLOCATION) { 745 755 block = get_maxblock(mm, order, tree); 746 756 if (block) 747 757 /* Store the obtained block order */ 748 - tmp = drm_buddy_block_order(block); 758 + tmp = gpu_buddy_block_order(block); 749 759 } else { 750 760 for (tmp = order; tmp <= mm->max_order; ++tmp) { 751 761 /* Get RB tree root for this order and tree */ ··· 758 768 759 769 if (!block) { 760 770 /* Try allocating from the other tree */ 761 - tree = (tree == DRM_BUDDY_CLEAR_TREE) ? 762 - DRM_BUDDY_DIRTY_TREE : DRM_BUDDY_CLEAR_TREE; 771 + tree = (tree == GPU_BUDDY_CLEAR_TREE) ? 772 + GPU_BUDDY_DIRTY_TREE : GPU_BUDDY_CLEAR_TREE; 763 773 764 774 for (tmp = order; tmp <= mm->max_order; ++tmp) { 765 775 root = &mm->free_trees[tree][tmp]; ··· 772 782 return ERR_PTR(-ENOSPC); 773 783 } 774 784 775 - BUG_ON(!drm_buddy_block_is_free(block)); 785 + BUG_ON(!gpu_buddy_block_is_free(block)); 776 786 777 787 while (tmp != order) { 778 788 err = split_block(mm, block); ··· 786 796 787 797 err_undo: 788 798 if (tmp != order) 789 - __drm_buddy_free(mm, block, false); 799 + __gpu_buddy_free(mm, block, false); 790 800 return ERR_PTR(err); 791 801 } 792 802 793 - static int __alloc_range(struct drm_buddy *mm, 803 + static int __alloc_range(struct gpu_buddy *mm, 794 804 struct list_head *dfs, 795 805 u64 start, u64 size, 796 806 struct list_head *blocks, 797 807 u64 *total_allocated_on_err) 798 808 { 799 - struct drm_buddy_block *block; 800 - struct drm_buddy_block *buddy; 809 + struct gpu_buddy_block *block; 810 + struct gpu_buddy_block *buddy; 801 811 u64 total_allocated = 0; 802 812 LIST_HEAD(allocated); 803 813 u64 end; ··· 810 820 u64 block_end; 811 821 812 822 block = list_first_entry_or_null(dfs, 813 - struct drm_buddy_block, 823 + struct gpu_buddy_block, 814 824 tmp_link); 815 825 if (!block) 816 826 break; 817 827 818 828 list_del(&block->tmp_link); 819 829 820 - block_start = drm_buddy_block_offset(block); 821 - block_end = block_start + drm_buddy_block_size(mm, block) - 1; 830 + block_start = gpu_buddy_block_offset(block); 831 + block_end = block_start + gpu_buddy_block_size(mm, block) - 1; 822 832 823 833 if (!overlaps(start, end, block_start, block_end)) 824 834 continue; 825 835 826 - if (drm_buddy_block_is_allocated(block)) { 836 + if (gpu_buddy_block_is_allocated(block)) { 827 837 err = -ENOSPC; 828 838 goto err_free; 829 839 } 830 840 831 841 if (contains(start, end, block_start, block_end)) { 832 - if (drm_buddy_block_is_free(block)) { 842 + if (gpu_buddy_block_is_free(block)) { 833 843 mark_allocated(mm, block); 834 - total_allocated += drm_buddy_block_size(mm, block); 835 - mm->avail -= drm_buddy_block_size(mm, block); 836 - if (drm_buddy_block_is_clear(block)) 837 - mm->clear_avail -= drm_buddy_block_size(mm, block); 844 + total_allocated += gpu_buddy_block_size(mm, block); 845 + mm->avail -= gpu_buddy_block_size(mm, block); 846 + if (gpu_buddy_block_is_clear(block)) 847 + mm->clear_avail -= gpu_buddy_block_size(mm, block); 838 848 list_add_tail(&block->link, &allocated); 839 849 continue; 840 850 } else if (!mm->clear_avail) { ··· 843 853 } 844 854 } 845 855 846 - if (!drm_buddy_block_is_split(block)) { 856 + if (!gpu_buddy_block_is_split(block)) { 847 857 err = split_block(mm, block); 848 858 if (unlikely(err)) 849 859 goto err_undo; ··· 870 880 */ 871 881 buddy = __get_buddy(block); 872 882 if (buddy && 873 - (drm_buddy_block_is_free(block) && 874 - drm_buddy_block_is_free(buddy))) 875 - __drm_buddy_free(mm, block, false); 883 + (gpu_buddy_block_is_free(block) && 884 + gpu_buddy_block_is_free(buddy))) 885 + __gpu_buddy_free(mm, block, false); 876 886 877 887 err_free: 878 888 if (err == -ENOSPC && total_allocated_on_err) { 879 889 list_splice_tail(&allocated, blocks); 880 890 *total_allocated_on_err = total_allocated; 881 891 } else { 882 - drm_buddy_free_list_internal(mm, &allocated); 892 + gpu_buddy_free_list_internal(mm, &allocated); 883 893 } 884 894 885 895 return err; 886 896 } 887 897 888 - static int __drm_buddy_alloc_range(struct drm_buddy *mm, 898 + static int __gpu_buddy_alloc_range(struct gpu_buddy *mm, 889 899 u64 start, 890 900 u64 size, 891 901 u64 *total_allocated_on_err, ··· 901 911 blocks, total_allocated_on_err); 902 912 } 903 913 904 - static int __alloc_contig_try_harder(struct drm_buddy *mm, 914 + static int __alloc_contig_try_harder(struct gpu_buddy *mm, 905 915 u64 size, 906 916 u64 min_block_size, 907 917 struct list_head *blocks) 908 918 { 909 919 u64 rhs_offset, lhs_offset, lhs_size, filled; 910 - struct drm_buddy_block *block; 920 + struct gpu_buddy_block *block; 911 921 unsigned int tree, order; 912 922 LIST_HEAD(blocks_lhs); 913 923 unsigned long pages; ··· 933 943 block = rbtree_get_free_block(iter); 934 944 935 945 /* Allocate blocks traversing RHS */ 936 - rhs_offset = drm_buddy_block_offset(block); 937 - err = __drm_buddy_alloc_range(mm, rhs_offset, size, 946 + rhs_offset = gpu_buddy_block_offset(block); 947 + err = __gpu_buddy_alloc_range(mm, rhs_offset, size, 938 948 &filled, blocks); 939 949 if (!err || err != -ENOSPC) 940 950 return err; ··· 944 954 lhs_size = round_up(lhs_size, min_block_size); 945 955 946 956 /* Allocate blocks traversing LHS */ 947 - lhs_offset = drm_buddy_block_offset(block) - lhs_size; 948 - err = __drm_buddy_alloc_range(mm, lhs_offset, lhs_size, 957 + lhs_offset = gpu_buddy_block_offset(block) - lhs_size; 958 + err = __gpu_buddy_alloc_range(mm, lhs_offset, lhs_size, 949 959 NULL, &blocks_lhs); 950 960 if (!err) { 951 961 list_splice(&blocks_lhs, blocks); 952 962 return 0; 953 963 } else if (err != -ENOSPC) { 954 - drm_buddy_free_list_internal(mm, blocks); 964 + gpu_buddy_free_list_internal(mm, blocks); 955 965 return err; 956 966 } 957 967 /* Free blocks for the next iteration */ 958 - drm_buddy_free_list_internal(mm, blocks); 968 + gpu_buddy_free_list_internal(mm, blocks); 959 969 960 970 iter = rb_prev(iter); 961 971 } ··· 965 975 } 966 976 967 977 /** 968 - * drm_buddy_block_trim - free unused pages 978 + * gpu_buddy_block_trim - free unused pages 969 979 * 970 - * @mm: DRM buddy manager 980 + * @mm: GPU buddy manager 971 981 * @start: start address to begin the trimming. 972 982 * @new_size: original size requested 973 983 * @blocks: Input and output list of allocated blocks. ··· 983 993 * Returns: 984 994 * 0 on success, error code on failure. 985 995 */ 986 - int drm_buddy_block_trim(struct drm_buddy *mm, 996 + int gpu_buddy_block_trim(struct gpu_buddy *mm, 987 997 u64 *start, 988 998 u64 new_size, 989 999 struct list_head *blocks) 990 1000 { 991 - struct drm_buddy_block *parent; 992 - struct drm_buddy_block *block; 1001 + struct gpu_buddy_block *parent; 1002 + struct gpu_buddy_block *block; 993 1003 u64 block_start, block_end; 994 1004 LIST_HEAD(dfs); 995 1005 u64 new_start; ··· 999 1009 return -EINVAL; 1000 1010 1001 1011 block = list_first_entry(blocks, 1002 - struct drm_buddy_block, 1012 + struct gpu_buddy_block, 1003 1013 link); 1004 1014 1005 - block_start = drm_buddy_block_offset(block); 1006 - block_end = block_start + drm_buddy_block_size(mm, block); 1015 + block_start = gpu_buddy_block_offset(block); 1016 + block_end = block_start + gpu_buddy_block_size(mm, block); 1007 1017 1008 - if (WARN_ON(!drm_buddy_block_is_allocated(block))) 1018 + if (WARN_ON(!gpu_buddy_block_is_allocated(block))) 1009 1019 return -EINVAL; 1010 1020 1011 - if (new_size > drm_buddy_block_size(mm, block)) 1021 + if (new_size > gpu_buddy_block_size(mm, block)) 1012 1022 return -EINVAL; 1013 1023 1014 1024 if (!new_size || !IS_ALIGNED(new_size, mm->chunk_size)) 1015 1025 return -EINVAL; 1016 1026 1017 - if (new_size == drm_buddy_block_size(mm, block)) 1027 + if (new_size == gpu_buddy_block_size(mm, block)) 1018 1028 return 0; 1019 1029 1020 1030 new_start = block_start; ··· 1033 1043 1034 1044 list_del(&block->link); 1035 1045 mark_free(mm, block); 1036 - mm->avail += drm_buddy_block_size(mm, block); 1037 - if (drm_buddy_block_is_clear(block)) 1038 - mm->clear_avail += drm_buddy_block_size(mm, block); 1046 + mm->avail += gpu_buddy_block_size(mm, block); 1047 + if (gpu_buddy_block_is_clear(block)) 1048 + mm->clear_avail += gpu_buddy_block_size(mm, block); 1039 1049 1040 1050 /* Prevent recursively freeing this node */ 1041 1051 parent = block->parent; ··· 1045 1055 err = __alloc_range(mm, &dfs, new_start, new_size, blocks, NULL); 1046 1056 if (err) { 1047 1057 mark_allocated(mm, block); 1048 - mm->avail -= drm_buddy_block_size(mm, block); 1049 - if (drm_buddy_block_is_clear(block)) 1050 - mm->clear_avail -= drm_buddy_block_size(mm, block); 1058 + mm->avail -= gpu_buddy_block_size(mm, block); 1059 + if (gpu_buddy_block_is_clear(block)) 1060 + mm->clear_avail -= gpu_buddy_block_size(mm, block); 1051 1061 list_add(&block->link, blocks); 1052 1062 } 1053 1063 1054 1064 block->parent = parent; 1055 1065 return err; 1056 1066 } 1057 - EXPORT_SYMBOL(drm_buddy_block_trim); 1067 + EXPORT_SYMBOL(gpu_buddy_block_trim); 1058 1068 1059 - static struct drm_buddy_block * 1060 - __drm_buddy_alloc_blocks(struct drm_buddy *mm, 1069 + static struct gpu_buddy_block * 1070 + __gpu_buddy_alloc_blocks(struct gpu_buddy *mm, 1061 1071 u64 start, u64 end, 1062 1072 unsigned int order, 1063 1073 unsigned long flags) 1064 1074 { 1065 - if (flags & DRM_BUDDY_RANGE_ALLOCATION) 1075 + if (flags & GPU_BUDDY_RANGE_ALLOCATION) 1066 1076 /* Allocate traversing within the range */ 1067 - return __drm_buddy_alloc_range_bias(mm, start, end, 1077 + return __gpu_buddy_alloc_range_bias(mm, start, end, 1068 1078 order, flags); 1069 1079 else 1070 1080 /* Allocate from freetree */ ··· 1072 1082 } 1073 1083 1074 1084 /** 1075 - * drm_buddy_alloc_blocks - allocate power-of-two blocks 1085 + * gpu_buddy_alloc_blocks - allocate power-of-two blocks 1076 1086 * 1077 - * @mm: DRM buddy manager to allocate from 1087 + * @mm: GPU buddy manager to allocate from 1078 1088 * @start: start of the allowed range for this block 1079 1089 * @end: end of the allowed range for this block 1080 1090 * @size: size of the allocation in bytes 1081 1091 * @min_block_size: alignment of the allocation 1082 1092 * @blocks: output list head to add allocated blocks 1083 - * @flags: DRM_BUDDY_*_ALLOCATION flags 1093 + * @flags: GPU_BUDDY_*_ALLOCATION flags 1084 1094 * 1085 1095 * alloc_range_bias() called on range limitations, which traverses 1086 1096 * the tree and returns the desired block. ··· 1091 1101 * Returns: 1092 1102 * 0 on success, error code on failure. 1093 1103 */ 1094 - int drm_buddy_alloc_blocks(struct drm_buddy *mm, 1104 + int gpu_buddy_alloc_blocks(struct gpu_buddy *mm, 1095 1105 u64 start, u64 end, u64 size, 1096 1106 u64 min_block_size, 1097 1107 struct list_head *blocks, 1098 1108 unsigned long flags) 1099 1109 { 1100 - struct drm_buddy_block *block = NULL; 1110 + struct gpu_buddy_block *block = NULL; 1101 1111 u64 original_size, original_min_size; 1102 1112 unsigned int min_order, order; 1103 1113 LIST_HEAD(allocated); ··· 1127 1137 if (!IS_ALIGNED(start | end, min_block_size)) 1128 1138 return -EINVAL; 1129 1139 1130 - return __drm_buddy_alloc_range(mm, start, size, NULL, blocks); 1140 + return __gpu_buddy_alloc_range(mm, start, size, NULL, blocks); 1131 1141 } 1132 1142 1133 1143 original_size = size; 1134 1144 original_min_size = min_block_size; 1135 1145 1136 1146 /* Roundup the size to power of 2 */ 1137 - if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION) { 1147 + if (flags & GPU_BUDDY_CONTIGUOUS_ALLOCATION) { 1138 1148 size = roundup_pow_of_two(size); 1139 1149 min_block_size = size; 1140 1150 /* Align size value to min_block_size */ ··· 1147 1157 min_order = ilog2(min_block_size) - ilog2(mm->chunk_size); 1148 1158 1149 1159 if (order > mm->max_order || size > mm->size) { 1150 - if ((flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION) && 1151 - !(flags & DRM_BUDDY_RANGE_ALLOCATION)) 1160 + if ((flags & GPU_BUDDY_CONTIGUOUS_ALLOCATION) && 1161 + !(flags & GPU_BUDDY_RANGE_ALLOCATION)) 1152 1162 return __alloc_contig_try_harder(mm, original_size, 1153 1163 original_min_size, blocks); 1154 1164 ··· 1161 1171 BUG_ON(order < min_order); 1162 1172 1163 1173 do { 1164 - block = __drm_buddy_alloc_blocks(mm, start, 1174 + block = __gpu_buddy_alloc_blocks(mm, start, 1165 1175 end, 1166 1176 order, 1167 1177 flags); ··· 1172 1182 /* Try allocation through force merge method */ 1173 1183 if (mm->clear_avail && 1174 1184 !__force_merge(mm, start, end, min_order)) { 1175 - block = __drm_buddy_alloc_blocks(mm, start, 1185 + block = __gpu_buddy_alloc_blocks(mm, start, 1176 1186 end, 1177 1187 min_order, 1178 1188 flags); ··· 1186 1196 * Try contiguous block allocation through 1187 1197 * try harder method. 1188 1198 */ 1189 - if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION && 1190 - !(flags & DRM_BUDDY_RANGE_ALLOCATION)) 1199 + if (flags & GPU_BUDDY_CONTIGUOUS_ALLOCATION && 1200 + !(flags & GPU_BUDDY_RANGE_ALLOCATION)) 1191 1201 return __alloc_contig_try_harder(mm, 1192 1202 original_size, 1193 1203 original_min_size, ··· 1198 1208 } while (1); 1199 1209 1200 1210 mark_allocated(mm, block); 1201 - mm->avail -= drm_buddy_block_size(mm, block); 1202 - if (drm_buddy_block_is_clear(block)) 1203 - mm->clear_avail -= drm_buddy_block_size(mm, block); 1211 + mm->avail -= gpu_buddy_block_size(mm, block); 1212 + if (gpu_buddy_block_is_clear(block)) 1213 + mm->clear_avail -= gpu_buddy_block_size(mm, block); 1204 1214 kmemleak_update_trace(block); 1205 1215 list_add_tail(&block->link, &allocated); 1206 1216 ··· 1211 1221 } while (1); 1212 1222 1213 1223 /* Trim the allocated block to the required size */ 1214 - if (!(flags & DRM_BUDDY_TRIM_DISABLE) && 1224 + if (!(flags & GPU_BUDDY_TRIM_DISABLE) && 1215 1225 original_size != size) { 1216 1226 struct list_head *trim_list; 1217 1227 LIST_HEAD(temp); ··· 1224 1234 block = list_last_entry(&allocated, typeof(*block), link); 1225 1235 list_move(&block->link, &temp); 1226 1236 trim_list = &temp; 1227 - trim_size = drm_buddy_block_size(mm, block) - 1237 + trim_size = gpu_buddy_block_size(mm, block) - 1228 1238 (size - original_size); 1229 1239 } 1230 1240 1231 - drm_buddy_block_trim(mm, 1241 + gpu_buddy_block_trim(mm, 1232 1242 NULL, 1233 1243 trim_size, 1234 1244 trim_list); ··· 1241 1251 return 0; 1242 1252 1243 1253 err_free: 1244 - drm_buddy_free_list_internal(mm, &allocated); 1254 + gpu_buddy_free_list_internal(mm, &allocated); 1245 1255 return err; 1246 1256 } 1247 - EXPORT_SYMBOL(drm_buddy_alloc_blocks); 1257 + EXPORT_SYMBOL(gpu_buddy_alloc_blocks); 1248 1258 1249 1259 /** 1250 - * drm_buddy_block_print - print block information 1260 + * gpu_buddy_block_print - print block information 1251 1261 * 1252 - * @mm: DRM buddy manager 1253 - * @block: DRM buddy block 1254 - * @p: DRM printer to use 1262 + * @mm: GPU buddy manager 1263 + * @block: GPU buddy block 1255 1264 */ 1256 - void drm_buddy_block_print(struct drm_buddy *mm, 1257 - struct drm_buddy_block *block, 1258 - struct drm_printer *p) 1265 + void gpu_buddy_block_print(struct gpu_buddy *mm, 1266 + struct gpu_buddy_block *block) 1259 1267 { 1260 - u64 start = drm_buddy_block_offset(block); 1261 - u64 size = drm_buddy_block_size(mm, block); 1268 + u64 start = gpu_buddy_block_offset(block); 1269 + u64 size = gpu_buddy_block_size(mm, block); 1262 1270 1263 - drm_printf(p, "%#018llx-%#018llx: %llu\n", start, start + size, size); 1271 + pr_info("%#018llx-%#018llx: %llu\n", start, start + size, size); 1264 1272 } 1265 - EXPORT_SYMBOL(drm_buddy_block_print); 1273 + EXPORT_SYMBOL(gpu_buddy_block_print); 1266 1274 1267 1275 /** 1268 - * drm_buddy_print - print allocator state 1276 + * gpu_buddy_print - print allocator state 1269 1277 * 1270 - * @mm: DRM buddy manager 1271 - * @p: DRM printer to use 1278 + * @mm: GPU buddy manager 1279 + * @p: GPU printer to use 1272 1280 */ 1273 - void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p) 1281 + void gpu_buddy_print(struct gpu_buddy *mm) 1274 1282 { 1275 1283 int order; 1276 1284 1277 - drm_printf(p, "chunk_size: %lluKiB, total: %lluMiB, free: %lluMiB, clear_free: %lluMiB\n", 1278 - mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20, mm->clear_avail >> 20); 1285 + pr_info("chunk_size: %lluKiB, total: %lluMiB, free: %lluMiB, clear_free: %lluMiB\n", 1286 + mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20, mm->clear_avail >> 20); 1279 1287 1280 1288 for (order = mm->max_order; order >= 0; order--) { 1281 - struct drm_buddy_block *block, *tmp; 1289 + struct gpu_buddy_block *block, *tmp; 1282 1290 struct rb_root *root; 1283 1291 u64 count = 0, free; 1284 1292 unsigned int tree; ··· 1285 1297 root = &mm->free_trees[tree][order]; 1286 1298 1287 1299 rbtree_postorder_for_each_entry_safe(block, tmp, root, rb) { 1288 - BUG_ON(!drm_buddy_block_is_free(block)); 1300 + BUG_ON(!gpu_buddy_block_is_free(block)); 1289 1301 count++; 1290 1302 } 1291 1303 } 1292 1304 1293 - drm_printf(p, "order-%2d ", order); 1294 - 1295 1305 free = count * (mm->chunk_size << order); 1296 1306 if (free < SZ_1M) 1297 - drm_printf(p, "free: %8llu KiB", free >> 10); 1307 + pr_info("order-%2d free: %8llu KiB, blocks: %llu\n", 1308 + order, free >> 10, count); 1298 1309 else 1299 - drm_printf(p, "free: %8llu MiB", free >> 20); 1300 - 1301 - drm_printf(p, ", blocks: %llu\n", count); 1310 + pr_info("order-%2d free: %8llu MiB, blocks: %llu\n", 1311 + order, free >> 20, count); 1302 1312 } 1303 1313 } 1304 - EXPORT_SYMBOL(drm_buddy_print); 1314 + EXPORT_SYMBOL(gpu_buddy_print); 1305 1315 1306 - static void drm_buddy_module_exit(void) 1316 + static void gpu_buddy_module_exit(void) 1307 1317 { 1308 1318 kmem_cache_destroy(slab_blocks); 1309 1319 } 1310 1320 1311 - static int __init drm_buddy_module_init(void) 1321 + static int __init gpu_buddy_module_init(void) 1312 1322 { 1313 - slab_blocks = KMEM_CACHE(drm_buddy_block, 0); 1323 + slab_blocks = KMEM_CACHE(gpu_buddy_block, 0); 1314 1324 if (!slab_blocks) 1315 1325 return -ENOMEM; 1316 1326 1317 1327 return 0; 1318 1328 } 1319 1329 1320 - module_init(drm_buddy_module_init); 1321 - module_exit(drm_buddy_module_exit); 1330 + module_init(gpu_buddy_module_init); 1331 + module_exit(gpu_buddy_module_exit); 1322 1332 1323 - MODULE_DESCRIPTION("DRM Buddy Allocator"); 1333 + MODULE_DESCRIPTION("GPU Buddy Allocator"); 1324 1334 MODULE_LICENSE("Dual MIT/GPL");
+1
drivers/gpu/drm/Kconfig
··· 220 220 config DRM_BUDDY 221 221 tristate 222 222 depends on DRM 223 + select GPU_BUDDY 223 224 help 224 225 A page based buddy allocator 225 226
+1 -1
drivers/gpu/drm/Makefile
··· 114 114 115 115 obj-$(CONFIG_DRM_GPUSVM) += drm_gpusvm_helper.o 116 116 117 - obj-$(CONFIG_DRM_BUDDY) += ../buddy.o 117 + obj-$(CONFIG_DRM_BUDDY) += drm_buddy.o 118 118 119 119 drm_dma_helper-y := drm_gem_dma_helper.o 120 120 drm_dma_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fbdev_dma.o
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
··· 5663 5663 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 5664 5664 struct amdgpu_vram_mgr_resource *vres; 5665 5665 struct ras_critical_region *region; 5666 - struct drm_buddy_block *block; 5666 + struct gpu_buddy_block *block; 5667 5667 int ret = 0; 5668 5668 5669 5669 if (!bo || !bo->tbo.resource)
+6 -6
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
··· 55 55 uint64_t start, uint64_t size, 56 56 struct amdgpu_res_cursor *cur) 57 57 { 58 - struct drm_buddy_block *block; 58 + struct gpu_buddy_block *block; 59 59 struct list_head *head, *next; 60 60 struct drm_mm_node *node; 61 61 ··· 71 71 head = &to_amdgpu_vram_mgr_resource(res)->blocks; 72 72 73 73 block = list_first_entry_or_null(head, 74 - struct drm_buddy_block, 74 + struct gpu_buddy_block, 75 75 link); 76 76 if (!block) 77 77 goto fallback; ··· 81 81 82 82 next = block->link.next; 83 83 if (next != head) 84 - block = list_entry(next, struct drm_buddy_block, link); 84 + block = list_entry(next, struct gpu_buddy_block, link); 85 85 } 86 86 87 87 cur->start = amdgpu_vram_mgr_block_start(block) + start; ··· 125 125 */ 126 126 static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size) 127 127 { 128 - struct drm_buddy_block *block; 128 + struct gpu_buddy_block *block; 129 129 struct drm_mm_node *node; 130 130 struct list_head *next; 131 131 ··· 146 146 block = cur->node; 147 147 148 148 next = block->link.next; 149 - block = list_entry(next, struct drm_buddy_block, link); 149 + block = list_entry(next, struct gpu_buddy_block, link); 150 150 151 151 cur->node = block; 152 152 cur->start = amdgpu_vram_mgr_block_start(block); ··· 175 175 */ 176 176 static inline bool amdgpu_res_cleared(struct amdgpu_res_cursor *cur) 177 177 { 178 - struct drm_buddy_block *block; 178 + struct gpu_buddy_block *block; 179 179 180 180 switch (cur->mem_type) { 181 181 case TTM_PL_VRAM:
+40 -39
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
··· 25 25 #include <linux/dma-mapping.h> 26 26 #include <drm/ttm/ttm_range_manager.h> 27 27 #include <drm/drm_drv.h> 28 + #include <drm/drm_buddy.h> 28 29 29 30 #include "amdgpu.h" 30 31 #include "amdgpu_vm.h" ··· 53 52 return container_of(mgr, struct amdgpu_device, mman.vram_mgr); 54 53 } 55 54 56 - static inline struct drm_buddy_block * 55 + static inline struct gpu_buddy_block * 57 56 amdgpu_vram_mgr_first_block(struct list_head *list) 58 57 { 59 - return list_first_entry_or_null(list, struct drm_buddy_block, link); 58 + return list_first_entry_or_null(list, struct gpu_buddy_block, link); 60 59 } 61 60 62 61 static inline bool amdgpu_is_vram_mgr_blocks_contiguous(struct list_head *head) 63 62 { 64 - struct drm_buddy_block *block; 63 + struct gpu_buddy_block *block; 65 64 u64 start, size; 66 65 67 66 block = amdgpu_vram_mgr_first_block(head); ··· 72 71 start = amdgpu_vram_mgr_block_start(block); 73 72 size = amdgpu_vram_mgr_block_size(block); 74 73 75 - block = list_entry(block->link.next, struct drm_buddy_block, link); 74 + block = list_entry(block->link.next, struct gpu_buddy_block, link); 76 75 if (start + size != amdgpu_vram_mgr_block_start(block)) 77 76 return false; 78 77 } ··· 82 81 83 82 static inline u64 amdgpu_vram_mgr_blocks_size(struct list_head *head) 84 83 { 85 - struct drm_buddy_block *block; 84 + struct gpu_buddy_block *block; 86 85 u64 size = 0; 87 86 88 87 list_for_each_entry(block, head, link) ··· 255 254 * Calculate how many bytes of the DRM BUDDY block are inside visible VRAM 256 255 */ 257 256 static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev, 258 - struct drm_buddy_block *block) 257 + struct gpu_buddy_block *block) 259 258 { 260 259 u64 start = amdgpu_vram_mgr_block_start(block); 261 260 u64 end = start + amdgpu_vram_mgr_block_size(block); ··· 280 279 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 281 280 struct ttm_resource *res = bo->tbo.resource; 282 281 struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res); 283 - struct drm_buddy_block *block; 282 + struct gpu_buddy_block *block; 284 283 u64 usage = 0; 285 284 286 285 if (amdgpu_gmc_vram_full_visible(&adev->gmc)) ··· 300 299 { 301 300 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); 302 301 struct amdgpu_device *adev = to_amdgpu_device(mgr); 303 - struct drm_buddy *mm = &mgr->mm; 302 + struct gpu_buddy *mm = &mgr->mm; 304 303 struct amdgpu_vram_reservation *rsv, *temp; 305 - struct drm_buddy_block *block; 304 + struct gpu_buddy_block *block; 306 305 uint64_t vis_usage; 307 306 308 307 list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks) { 309 - if (drm_buddy_alloc_blocks(mm, rsv->start, rsv->start + rsv->size, 308 + if (gpu_buddy_alloc_blocks(mm, rsv->start, rsv->start + rsv->size, 310 309 rsv->size, mm->chunk_size, &rsv->allocated, 311 - DRM_BUDDY_RANGE_ALLOCATION)) 310 + GPU_BUDDY_RANGE_ALLOCATION)) 312 311 continue; 313 312 314 313 block = amdgpu_vram_mgr_first_block(&rsv->allocated); ··· 404 403 uint64_t address, struct amdgpu_vram_block_info *info) 405 404 { 406 405 struct amdgpu_vram_mgr_resource *vres; 407 - struct drm_buddy_block *block; 406 + struct gpu_buddy_block *block; 408 407 u64 start, size; 409 408 int ret = -ENOENT; 410 409 ··· 451 450 struct amdgpu_vram_mgr_resource *vres; 452 451 u64 size, remaining_size, lpfn, fpfn; 453 452 unsigned int adjust_dcc_size = 0; 454 - struct drm_buddy *mm = &mgr->mm; 455 - struct drm_buddy_block *block; 453 + struct gpu_buddy *mm = &mgr->mm; 454 + struct gpu_buddy_block *block; 456 455 unsigned long pages_per_block; 457 456 int r; 458 457 ··· 494 493 INIT_LIST_HEAD(&vres->blocks); 495 494 496 495 if (place->flags & TTM_PL_FLAG_TOPDOWN) 497 - vres->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION; 496 + vres->flags |= GPU_BUDDY_TOPDOWN_ALLOCATION; 498 497 499 498 if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) 500 - vres->flags |= DRM_BUDDY_CONTIGUOUS_ALLOCATION; 499 + vres->flags |= GPU_BUDDY_CONTIGUOUS_ALLOCATION; 501 500 502 501 if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED) 503 - vres->flags |= DRM_BUDDY_CLEAR_ALLOCATION; 502 + vres->flags |= GPU_BUDDY_CLEAR_ALLOCATION; 504 503 505 504 if (fpfn || lpfn != mgr->mm.size) 506 505 /* Allocate blocks in desired range */ 507 - vres->flags |= DRM_BUDDY_RANGE_ALLOCATION; 506 + vres->flags |= GPU_BUDDY_RANGE_ALLOCATION; 508 507 509 508 if (bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC && 510 509 adev->gmc.gmc_funcs->get_dcc_alignment) ··· 517 516 dcc_size = roundup_pow_of_two(vres->base.size + adjust_dcc_size); 518 517 remaining_size = (u64)dcc_size; 519 518 520 - vres->flags |= DRM_BUDDY_TRIM_DISABLE; 519 + vres->flags |= GPU_BUDDY_TRIM_DISABLE; 521 520 } 522 521 523 522 mutex_lock(&mgr->lock); ··· 537 536 538 537 BUG_ON(min_block_size < mm->chunk_size); 539 538 540 - r = drm_buddy_alloc_blocks(mm, fpfn, 539 + r = gpu_buddy_alloc_blocks(mm, fpfn, 541 540 lpfn, 542 541 size, 543 542 min_block_size, ··· 546 545 547 546 if (unlikely(r == -ENOSPC) && pages_per_block == ~0ul && 548 547 !(place->flags & TTM_PL_FLAG_CONTIGUOUS)) { 549 - vres->flags &= ~DRM_BUDDY_CONTIGUOUS_ALLOCATION; 548 + vres->flags &= ~GPU_BUDDY_CONTIGUOUS_ALLOCATION; 550 549 pages_per_block = max_t(u32, 2UL << (20UL - PAGE_SHIFT), 551 550 tbo->page_alignment); 552 551 ··· 567 566 list_add_tail(&vres->vres_node, &mgr->allocated_vres_list); 568 567 569 568 if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size) { 570 - struct drm_buddy_block *dcc_block; 569 + struct gpu_buddy_block *dcc_block; 571 570 unsigned long dcc_start; 572 571 u64 trim_start; 573 572 ··· 577 576 roundup((unsigned long)amdgpu_vram_mgr_block_start(dcc_block), 578 577 adjust_dcc_size); 579 578 trim_start = (u64)dcc_start; 580 - drm_buddy_block_trim(mm, &trim_start, 579 + gpu_buddy_block_trim(mm, &trim_start, 581 580 (u64)vres->base.size, 582 581 &vres->blocks); 583 582 } ··· 615 614 return 0; 616 615 617 616 error_free_blocks: 618 - drm_buddy_free_list(mm, &vres->blocks, 0); 617 + gpu_buddy_free_list(mm, &vres->blocks, 0); 619 618 mutex_unlock(&mgr->lock); 620 619 error_fini: 621 620 ttm_resource_fini(man, &vres->base); ··· 638 637 struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res); 639 638 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); 640 639 struct amdgpu_device *adev = to_amdgpu_device(mgr); 641 - struct drm_buddy *mm = &mgr->mm; 642 - struct drm_buddy_block *block; 640 + struct gpu_buddy *mm = &mgr->mm; 641 + struct gpu_buddy_block *block; 643 642 uint64_t vis_usage = 0; 644 643 645 644 mutex_lock(&mgr->lock); ··· 650 649 list_for_each_entry(block, &vres->blocks, link) 651 650 vis_usage += amdgpu_vram_mgr_vis_size(adev, block); 652 651 653 - drm_buddy_free_list(mm, &vres->blocks, vres->flags); 652 + gpu_buddy_free_list(mm, &vres->blocks, vres->flags); 654 653 amdgpu_vram_mgr_do_reserve(man); 655 654 mutex_unlock(&mgr->lock); 656 655 ··· 689 688 if (!*sgt) 690 689 return -ENOMEM; 691 690 692 - /* Determine the number of DRM_BUDDY blocks to export */ 691 + /* Determine the number of GPU_BUDDY blocks to export */ 693 692 amdgpu_res_first(res, offset, length, &cursor); 694 693 while (cursor.remaining) { 695 694 num_entries++; ··· 705 704 sg->length = 0; 706 705 707 706 /* 708 - * Walk down DRM_BUDDY blocks to populate scatterlist nodes 709 - * @note: Use iterator api to get first the DRM_BUDDY block 707 + * Walk down GPU_BUDDY blocks to populate scatterlist nodes 708 + * @note: Use iterator api to get first the GPU_BUDDY block 710 709 * and the number of bytes from it. Access the following 711 - * DRM_BUDDY block(s) if more buffer needs to exported 710 + * GPU_BUDDY block(s) if more buffer needs to exported 712 711 */ 713 712 amdgpu_res_first(res, offset, length, &cursor); 714 713 for_each_sgtable_sg((*sgt), sg, i) { ··· 793 792 void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev) 794 793 { 795 794 struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr; 796 - struct drm_buddy *mm = &mgr->mm; 795 + struct gpu_buddy *mm = &mgr->mm; 797 796 798 797 mutex_lock(&mgr->lock); 799 - drm_buddy_reset_clear(mm, false); 798 + gpu_buddy_reset_clear(mm, false); 800 799 mutex_unlock(&mgr->lock); 801 800 } 802 801 ··· 816 815 size_t size) 817 816 { 818 817 struct amdgpu_vram_mgr_resource *mgr = to_amdgpu_vram_mgr_resource(res); 819 - struct drm_buddy_block *block; 818 + struct gpu_buddy_block *block; 820 819 821 820 /* Check each drm buddy block individually */ 822 821 list_for_each_entry(block, &mgr->blocks, link) { ··· 849 848 size_t size) 850 849 { 851 850 struct amdgpu_vram_mgr_resource *mgr = to_amdgpu_vram_mgr_resource(res); 852 - struct drm_buddy_block *block; 851 + struct gpu_buddy_block *block; 853 852 854 853 /* Check each drm buddy block individually */ 855 854 list_for_each_entry(block, &mgr->blocks, link) { ··· 878 877 struct drm_printer *printer) 879 878 { 880 879 struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); 881 - struct drm_buddy *mm = &mgr->mm; 880 + struct gpu_buddy *mm = &mgr->mm; 882 881 struct amdgpu_vram_reservation *rsv; 883 882 884 883 drm_printf(printer, " vis usage:%llu\n", ··· 931 930 mgr->default_page_size = PAGE_SIZE; 932 931 933 932 man->func = &amdgpu_vram_mgr_func; 934 - err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE); 933 + err = gpu_buddy_init(&mgr->mm, man->size, PAGE_SIZE); 935 934 if (err) 936 935 return err; 937 936 ··· 966 965 kfree(rsv); 967 966 968 967 list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, blocks) { 969 - drm_buddy_free_list(&mgr->mm, &rsv->allocated, 0); 968 + gpu_buddy_free_list(&mgr->mm, &rsv->allocated, 0); 970 969 kfree(rsv); 971 970 } 972 971 if (!adev->gmc.is_app_apu) 973 - drm_buddy_fini(&mgr->mm); 972 + gpu_buddy_fini(&mgr->mm); 974 973 mutex_unlock(&mgr->lock); 975 974 976 975 ttm_resource_manager_cleanup(man);
+9 -9
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
··· 28 28 29 29 struct amdgpu_vram_mgr { 30 30 struct ttm_resource_manager manager; 31 - struct drm_buddy mm; 31 + struct gpu_buddy mm; 32 32 /* protects access to buffer objects */ 33 33 struct mutex lock; 34 34 struct list_head reservations_pending; ··· 57 57 struct amdgpu_vres_task task; 58 58 }; 59 59 60 - static inline u64 amdgpu_vram_mgr_block_start(struct drm_buddy_block *block) 60 + static inline u64 amdgpu_vram_mgr_block_start(struct gpu_buddy_block *block) 61 61 { 62 - return drm_buddy_block_offset(block); 62 + return gpu_buddy_block_offset(block); 63 63 } 64 64 65 - static inline u64 amdgpu_vram_mgr_block_size(struct drm_buddy_block *block) 65 + static inline u64 amdgpu_vram_mgr_block_size(struct gpu_buddy_block *block) 66 66 { 67 - return (u64)PAGE_SIZE << drm_buddy_block_order(block); 67 + return (u64)PAGE_SIZE << gpu_buddy_block_order(block); 68 68 } 69 69 70 - static inline bool amdgpu_vram_mgr_is_cleared(struct drm_buddy_block *block) 70 + static inline bool amdgpu_vram_mgr_is_cleared(struct gpu_buddy_block *block) 71 71 { 72 - return drm_buddy_block_is_clear(block); 72 + return gpu_buddy_block_is_clear(block); 73 73 } 74 74 75 75 static inline struct amdgpu_vram_mgr_resource * ··· 82 82 { 83 83 struct amdgpu_vram_mgr_resource *ares = to_amdgpu_vram_mgr_resource(res); 84 84 85 - WARN_ON(ares->flags & DRM_BUDDY_CLEARED); 86 - ares->flags |= DRM_BUDDY_CLEARED; 85 + WARN_ON(ares->flags & GPU_BUDDY_CLEARED); 86 + ares->flags |= GPU_BUDDY_CLEARED; 87 87 } 88 88 89 89 int amdgpu_vram_mgr_query_address_block_info(struct amdgpu_vram_mgr *mgr,
+77
drivers/gpu/drm/drm_buddy.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright © 2021 Intel Corporation 4 + */ 5 + 6 + #include <kunit/test-bug.h> 7 + 8 + #include <linux/export.h> 9 + #include <linux/kmemleak.h> 10 + #include <linux/module.h> 11 + #include <linux/sizes.h> 12 + 13 + #include <linux/gpu_buddy.h> 14 + #include <drm/drm_buddy.h> 15 + #include <drm/drm_print.h> 16 + 17 + /** 18 + * drm_buddy_block_print - print block information 19 + * 20 + * @mm: DRM buddy manager 21 + * @block: DRM buddy block 22 + * @p: DRM printer to use 23 + */ 24 + void drm_buddy_block_print(struct gpu_buddy *mm, 25 + struct gpu_buddy_block *block, 26 + struct drm_printer *p) 27 + { 28 + u64 start = gpu_buddy_block_offset(block); 29 + u64 size = gpu_buddy_block_size(mm, block); 30 + 31 + drm_printf(p, "%#018llx-%#018llx: %llu\n", start, start + size, size); 32 + } 33 + EXPORT_SYMBOL(drm_buddy_block_print); 34 + 35 + /** 36 + * drm_buddy_print - print allocator state 37 + * 38 + * @mm: DRM buddy manager 39 + * @p: DRM printer to use 40 + */ 41 + void drm_buddy_print(struct gpu_buddy *mm, struct drm_printer *p) 42 + { 43 + int order; 44 + 45 + drm_printf(p, "chunk_size: %lluKiB, total: %lluMiB, free: %lluMiB, clear_free: %lluMiB\n", 46 + mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20, mm->clear_avail >> 20); 47 + 48 + for (order = mm->max_order; order >= 0; order--) { 49 + struct gpu_buddy_block *block, *tmp; 50 + struct rb_root *root; 51 + u64 count = 0, free; 52 + unsigned int tree; 53 + 54 + for_each_free_tree(tree) { 55 + root = &mm->free_trees[tree][order]; 56 + 57 + rbtree_postorder_for_each_entry_safe(block, tmp, root, rb) { 58 + BUG_ON(!gpu_buddy_block_is_free(block)); 59 + count++; 60 + } 61 + } 62 + 63 + drm_printf(p, "order-%2d ", order); 64 + 65 + free = count * (mm->chunk_size << order); 66 + if (free < SZ_1M) 67 + drm_printf(p, "free: %8llu KiB", free >> 10); 68 + else 69 + drm_printf(p, "free: %8llu MiB", free >> 20); 70 + 71 + drm_printf(p, ", blocks: %llu\n", count); 72 + } 73 + } 74 + EXPORT_SYMBOL(drm_buddy_print); 75 + 76 + MODULE_DESCRIPTION("DRM-specific GPU Buddy Allocator Print Helpers"); 77 + MODULE_LICENSE("Dual MIT/GPL");
+4 -4
drivers/gpu/drm/i915/i915_scatterlist.c
··· 167 167 struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res); 168 168 const u64 size = res->size; 169 169 const u32 max_segment = round_down(UINT_MAX, page_alignment); 170 - struct drm_buddy *mm = bman_res->mm; 170 + struct gpu_buddy *mm = bman_res->mm; 171 171 struct list_head *blocks = &bman_res->blocks; 172 - struct drm_buddy_block *block; 172 + struct gpu_buddy_block *block; 173 173 struct i915_refct_sgt *rsgt; 174 174 struct scatterlist *sg; 175 175 struct sg_table *st; ··· 202 202 list_for_each_entry(block, blocks, link) { 203 203 u64 block_size, offset; 204 204 205 - block_size = min_t(u64, size, drm_buddy_block_size(mm, block)); 206 - offset = drm_buddy_block_offset(block); 205 + block_size = min_t(u64, size, gpu_buddy_block_size(mm, block)); 206 + offset = gpu_buddy_block_offset(block); 207 207 208 208 while (block_size) { 209 209 u64 len;
+28 -27
drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
··· 6 6 #include <linux/slab.h> 7 7 8 8 #include <linux/gpu_buddy.h> 9 + #include <drm/drm_buddy.h> 9 10 #include <drm/drm_print.h> 10 11 #include <drm/ttm/ttm_placement.h> 11 12 #include <drm/ttm/ttm_bo.h> ··· 17 16 18 17 struct i915_ttm_buddy_manager { 19 18 struct ttm_resource_manager manager; 20 - struct drm_buddy mm; 19 + struct gpu_buddy mm; 21 20 struct list_head reserved; 22 21 struct mutex lock; 23 22 unsigned long visible_size; ··· 39 38 { 40 39 struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); 41 40 struct i915_ttm_buddy_resource *bman_res; 42 - struct drm_buddy *mm = &bman->mm; 41 + struct gpu_buddy *mm = &bman->mm; 43 42 unsigned long n_pages, lpfn; 44 43 u64 min_page_size; 45 44 u64 size; ··· 58 57 bman_res->mm = mm; 59 58 60 59 if (place->flags & TTM_PL_FLAG_TOPDOWN) 61 - bman_res->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION; 60 + bman_res->flags |= GPU_BUDDY_TOPDOWN_ALLOCATION; 62 61 63 62 if (place->flags & TTM_PL_FLAG_CONTIGUOUS) 64 - bman_res->flags |= DRM_BUDDY_CONTIGUOUS_ALLOCATION; 63 + bman_res->flags |= GPU_BUDDY_CONTIGUOUS_ALLOCATION; 65 64 66 65 if (place->fpfn || lpfn != man->size) 67 - bman_res->flags |= DRM_BUDDY_RANGE_ALLOCATION; 66 + bman_res->flags |= GPU_BUDDY_RANGE_ALLOCATION; 68 67 69 68 GEM_BUG_ON(!bman_res->base.size); 70 69 size = bman_res->base.size; ··· 90 89 goto err_free_res; 91 90 } 92 91 93 - err = drm_buddy_alloc_blocks(mm, (u64)place->fpfn << PAGE_SHIFT, 92 + err = gpu_buddy_alloc_blocks(mm, (u64)place->fpfn << PAGE_SHIFT, 94 93 (u64)lpfn << PAGE_SHIFT, 95 94 (u64)n_pages << PAGE_SHIFT, 96 95 min_page_size, ··· 102 101 if (lpfn <= bman->visible_size) { 103 102 bman_res->used_visible_size = PFN_UP(bman_res->base.size); 104 103 } else { 105 - struct drm_buddy_block *block; 104 + struct gpu_buddy_block *block; 106 105 107 106 list_for_each_entry(block, &bman_res->blocks, link) { 108 107 unsigned long start = 109 - drm_buddy_block_offset(block) >> PAGE_SHIFT; 108 + gpu_buddy_block_offset(block) >> PAGE_SHIFT; 110 109 111 110 if (start < bman->visible_size) { 112 111 unsigned long end = start + 113 - (drm_buddy_block_size(mm, block) >> PAGE_SHIFT); 112 + (gpu_buddy_block_size(mm, block) >> PAGE_SHIFT); 114 113 115 114 bman_res->used_visible_size += 116 115 min(end, bman->visible_size) - start; ··· 127 126 return 0; 128 127 129 128 err_free_blocks: 130 - drm_buddy_free_list(mm, &bman_res->blocks, 0); 129 + gpu_buddy_free_list(mm, &bman_res->blocks, 0); 131 130 mutex_unlock(&bman->lock); 132 131 err_free_res: 133 132 ttm_resource_fini(man, &bman_res->base); ··· 142 141 struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); 143 142 144 143 mutex_lock(&bman->lock); 145 - drm_buddy_free_list(&bman->mm, &bman_res->blocks, 0); 144 + gpu_buddy_free_list(&bman->mm, &bman_res->blocks, 0); 146 145 bman->visible_avail += bman_res->used_visible_size; 147 146 mutex_unlock(&bman->lock); 148 147 ··· 157 156 { 158 157 struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res); 159 158 struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); 160 - struct drm_buddy *mm = &bman->mm; 161 - struct drm_buddy_block *block; 159 + struct gpu_buddy *mm = &bman->mm; 160 + struct gpu_buddy_block *block; 162 161 163 162 if (!place->fpfn && !place->lpfn) 164 163 return true; ··· 177 176 /* Check each drm buddy block individually */ 178 177 list_for_each_entry(block, &bman_res->blocks, link) { 179 178 unsigned long fpfn = 180 - drm_buddy_block_offset(block) >> PAGE_SHIFT; 179 + gpu_buddy_block_offset(block) >> PAGE_SHIFT; 181 180 unsigned long lpfn = fpfn + 182 - (drm_buddy_block_size(mm, block) >> PAGE_SHIFT); 181 + (gpu_buddy_block_size(mm, block) >> PAGE_SHIFT); 183 182 184 183 if (place->fpfn < lpfn && place->lpfn > fpfn) 185 184 return true; ··· 195 194 { 196 195 struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res); 197 196 struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); 198 - struct drm_buddy *mm = &bman->mm; 199 - struct drm_buddy_block *block; 197 + struct gpu_buddy *mm = &bman->mm; 198 + struct gpu_buddy_block *block; 200 199 201 200 if (!place->fpfn && !place->lpfn) 202 201 return true; ··· 210 209 /* Check each drm buddy block individually */ 211 210 list_for_each_entry(block, &bman_res->blocks, link) { 212 211 unsigned long fpfn = 213 - drm_buddy_block_offset(block) >> PAGE_SHIFT; 212 + gpu_buddy_block_offset(block) >> PAGE_SHIFT; 214 213 unsigned long lpfn = fpfn + 215 - (drm_buddy_block_size(mm, block) >> PAGE_SHIFT); 214 + (gpu_buddy_block_size(mm, block) >> PAGE_SHIFT); 216 215 217 216 if (fpfn < place->fpfn || lpfn > place->lpfn) 218 217 return false; ··· 225 224 struct drm_printer *printer) 226 225 { 227 226 struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); 228 - struct drm_buddy_block *block; 227 + struct gpu_buddy_block *block; 229 228 230 229 mutex_lock(&bman->lock); 231 230 drm_printf(printer, "default_page_size: %lluKiB\n", ··· 294 293 if (!bman) 295 294 return -ENOMEM; 296 295 297 - err = drm_buddy_init(&bman->mm, size, chunk_size); 296 + err = gpu_buddy_init(&bman->mm, size, chunk_size); 298 297 if (err) 299 298 goto err_free_bman; 300 299 ··· 334 333 { 335 334 struct ttm_resource_manager *man = ttm_manager_type(bdev, type); 336 335 struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); 337 - struct drm_buddy *mm = &bman->mm; 336 + struct gpu_buddy *mm = &bman->mm; 338 337 int ret; 339 338 340 339 ttm_resource_manager_set_used(man, false); ··· 346 345 ttm_set_driver_manager(bdev, type, NULL); 347 346 348 347 mutex_lock(&bman->lock); 349 - drm_buddy_free_list(mm, &bman->reserved, 0); 350 - drm_buddy_fini(mm); 348 + gpu_buddy_free_list(mm, &bman->reserved, 0); 349 + gpu_buddy_fini(mm); 351 350 bman->visible_avail += bman->visible_reserved; 352 351 WARN_ON_ONCE(bman->visible_avail != bman->visible_size); 353 352 mutex_unlock(&bman->lock); ··· 372 371 u64 start, u64 size) 373 372 { 374 373 struct i915_ttm_buddy_manager *bman = to_buddy_manager(man); 375 - struct drm_buddy *mm = &bman->mm; 374 + struct gpu_buddy *mm = &bman->mm; 376 375 unsigned long fpfn = start >> PAGE_SHIFT; 377 376 unsigned long flags = 0; 378 377 int ret; 379 378 380 - flags |= DRM_BUDDY_RANGE_ALLOCATION; 379 + flags |= GPU_BUDDY_RANGE_ALLOCATION; 381 380 382 381 mutex_lock(&bman->lock); 383 - ret = drm_buddy_alloc_blocks(mm, start, 382 + ret = gpu_buddy_alloc_blocks(mm, start, 384 383 start + size, 385 384 size, mm->chunk_size, 386 385 &bman->reserved,
+2 -2
drivers/gpu/drm/i915/i915_ttm_buddy_manager.h
··· 13 13 14 14 struct ttm_device; 15 15 struct ttm_resource_manager; 16 - struct drm_buddy; 16 + struct gpu_buddy; 17 17 18 18 /** 19 19 * struct i915_ttm_buddy_resource ··· 33 33 struct list_head blocks; 34 34 unsigned long flags; 35 35 unsigned long used_visible_size; 36 - struct drm_buddy *mm; 36 + struct gpu_buddy *mm; 37 37 }; 38 38 39 39 /**
+10 -10
drivers/gpu/drm/i915/selftests/intel_memory_region.c
··· 6 6 #include <linux/prime_numbers.h> 7 7 #include <linux/sort.h> 8 8 9 - #include <drm/drm_buddy.h> 9 + #include <linux/gpu_buddy.h> 10 10 11 11 #include "../i915_selftest.h" 12 12 ··· 371 371 struct drm_i915_private *i915 = mem->i915; 372 372 struct i915_ttm_buddy_resource *res; 373 373 struct drm_i915_gem_object *obj; 374 - struct drm_buddy *mm; 374 + struct gpu_buddy *mm; 375 375 unsigned int expected_order; 376 376 LIST_HEAD(objects); 377 377 u64 size; ··· 447 447 struct drm_i915_private *i915 = mem->i915; 448 448 struct i915_ttm_buddy_resource *res; 449 449 struct drm_i915_gem_object *obj; 450 - struct drm_buddy_block *block; 451 - struct drm_buddy *mm; 450 + struct gpu_buddy_block *block; 451 + struct gpu_buddy *mm; 452 452 struct list_head *blocks; 453 453 struct scatterlist *sg; 454 454 I915_RND_STATE(prng); ··· 487 487 mm = res->mm; 488 488 size = 0; 489 489 list_for_each_entry(block, blocks, link) { 490 - if (drm_buddy_block_size(mm, block) > size) 491 - size = drm_buddy_block_size(mm, block); 490 + if (gpu_buddy_block_size(mm, block) > size) 491 + size = gpu_buddy_block_size(mm, block); 492 492 } 493 493 if (size < max_segment) { 494 494 pr_err("%s: Failed to create a huge contiguous block [> %u], largest block %lld\n", ··· 527 527 struct intel_memory_region *mr = obj->mm.region; 528 528 struct i915_ttm_buddy_resource *bman_res = 529 529 to_ttm_buddy_resource(obj->mm.res); 530 - struct drm_buddy *mm = bman_res->mm; 531 - struct drm_buddy_block *block; 530 + struct gpu_buddy *mm = bman_res->mm; 531 + struct gpu_buddy_block *block; 532 532 u64 total; 533 533 534 534 total = 0; 535 535 list_for_each_entry(block, &bman_res->blocks, link) { 536 - u64 start = drm_buddy_block_offset(block); 537 - u64 end = start + drm_buddy_block_size(mm, block); 536 + u64 start = gpu_buddy_block_offset(block); 537 + u64 end = start + gpu_buddy_block_size(mm, block); 538 538 539 539 if (start < resource_size(&mr->io)) 540 540 total += min_t(u64, end, resource_size(&mr->io)) - start;
+2 -2
drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
··· 251 251 NULL, &dummy_ttm_bo_destroy); 252 252 KUNIT_EXPECT_EQ(test, err, 0); 253 253 254 - snd_place = ttm_place_kunit_init(test, snd_mem, DRM_BUDDY_TOPDOWN_ALLOCATION); 254 + snd_place = ttm_place_kunit_init(test, snd_mem, GPU_BUDDY_TOPDOWN_ALLOCATION); 255 255 snd_placement = ttm_placement_kunit_init(test, snd_place, 1); 256 256 257 257 err = ttm_bo_validate(bo, snd_placement, &ctx_val); ··· 263 263 KUNIT_EXPECT_TRUE(test, ttm_tt_is_populated(bo->ttm)); 264 264 KUNIT_EXPECT_EQ(test, bo->resource->mem_type, snd_mem); 265 265 KUNIT_EXPECT_EQ(test, bo->resource->placement, 266 - DRM_BUDDY_TOPDOWN_ALLOCATION); 266 + GPU_BUDDY_TOPDOWN_ALLOCATION); 267 267 268 268 ttm_bo_fini(bo); 269 269 ttm_mock_manager_fini(priv->ttm_dev, snd_mem);
+9 -9
drivers/gpu/drm/ttm/tests/ttm_mock_manager.c
··· 31 31 { 32 32 struct ttm_mock_manager *manager = to_mock_mgr(man); 33 33 struct ttm_mock_resource *mock_res; 34 - struct drm_buddy *mm = &manager->mm; 34 + struct gpu_buddy *mm = &manager->mm; 35 35 u64 lpfn, fpfn, alloc_size; 36 36 int err; 37 37 ··· 47 47 INIT_LIST_HEAD(&mock_res->blocks); 48 48 49 49 if (place->flags & TTM_PL_FLAG_TOPDOWN) 50 - mock_res->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION; 50 + mock_res->flags |= GPU_BUDDY_TOPDOWN_ALLOCATION; 51 51 52 52 if (place->flags & TTM_PL_FLAG_CONTIGUOUS) 53 - mock_res->flags |= DRM_BUDDY_CONTIGUOUS_ALLOCATION; 53 + mock_res->flags |= GPU_BUDDY_CONTIGUOUS_ALLOCATION; 54 54 55 55 alloc_size = (uint64_t)mock_res->base.size; 56 56 mutex_lock(&manager->lock); 57 - err = drm_buddy_alloc_blocks(mm, fpfn, lpfn, alloc_size, 57 + err = gpu_buddy_alloc_blocks(mm, fpfn, lpfn, alloc_size, 58 58 manager->default_page_size, 59 59 &mock_res->blocks, 60 60 mock_res->flags); ··· 67 67 return 0; 68 68 69 69 error_free_blocks: 70 - drm_buddy_free_list(mm, &mock_res->blocks, 0); 70 + gpu_buddy_free_list(mm, &mock_res->blocks, 0); 71 71 ttm_resource_fini(man, &mock_res->base); 72 72 mutex_unlock(&manager->lock); 73 73 ··· 79 79 { 80 80 struct ttm_mock_manager *manager = to_mock_mgr(man); 81 81 struct ttm_mock_resource *mock_res = to_mock_mgr_resource(res); 82 - struct drm_buddy *mm = &manager->mm; 82 + struct gpu_buddy *mm = &manager->mm; 83 83 84 84 mutex_lock(&manager->lock); 85 - drm_buddy_free_list(mm, &mock_res->blocks, 0); 85 + gpu_buddy_free_list(mm, &mock_res->blocks, 0); 86 86 mutex_unlock(&manager->lock); 87 87 88 88 ttm_resource_fini(man, res); ··· 106 106 107 107 mutex_init(&manager->lock); 108 108 109 - err = drm_buddy_init(&manager->mm, size, PAGE_SIZE); 109 + err = gpu_buddy_init(&manager->mm, size, PAGE_SIZE); 110 110 111 111 if (err) { 112 112 kfree(manager); ··· 142 142 ttm_resource_manager_set_used(man, false); 143 143 144 144 mutex_lock(&mock_man->lock); 145 - drm_buddy_fini(&mock_man->mm); 145 + gpu_buddy_fini(&mock_man->mm); 146 146 mutex_unlock(&mock_man->lock); 147 147 148 148 ttm_set_driver_manager(bdev, mem_type, NULL);
+1 -1
drivers/gpu/drm/ttm/tests/ttm_mock_manager.h
··· 9 9 10 10 struct ttm_mock_manager { 11 11 struct ttm_resource_manager man; 12 - struct drm_buddy mm; 12 + struct gpu_buddy mm; 13 13 u64 default_page_size; 14 14 /* protects allocations of mock buffer objects */ 15 15 struct mutex lock;
+17 -17
drivers/gpu/drm/xe/xe_res_cursor.h
··· 58 58 /** @dma_addr: Current element in a struct drm_pagemap_addr array */ 59 59 const struct drm_pagemap_addr *dma_addr; 60 60 /** @mm: Buddy allocator for VRAM cursor */ 61 - struct drm_buddy *mm; 61 + struct gpu_buddy *mm; 62 62 /** 63 63 * @dma_start: DMA start address for the current segment. 64 64 * This may be different to @dma_addr.addr since elements in ··· 69 69 u64 dma_seg_size; 70 70 }; 71 71 72 - static struct drm_buddy *xe_res_get_buddy(struct ttm_resource *res) 72 + static struct gpu_buddy *xe_res_get_buddy(struct ttm_resource *res) 73 73 { 74 74 struct ttm_resource_manager *mgr; 75 75 ··· 104 104 case XE_PL_STOLEN: 105 105 case XE_PL_VRAM0: 106 106 case XE_PL_VRAM1: { 107 - struct drm_buddy_block *block; 107 + struct gpu_buddy_block *block; 108 108 struct list_head *head, *next; 109 - struct drm_buddy *mm = xe_res_get_buddy(res); 109 + struct gpu_buddy *mm = xe_res_get_buddy(res); 110 110 111 111 head = &to_xe_ttm_vram_mgr_resource(res)->blocks; 112 112 113 113 block = list_first_entry_or_null(head, 114 - struct drm_buddy_block, 114 + struct gpu_buddy_block, 115 115 link); 116 116 if (!block) 117 117 goto fallback; 118 118 119 - while (start >= drm_buddy_block_size(mm, block)) { 120 - start -= drm_buddy_block_size(mm, block); 119 + while (start >= gpu_buddy_block_size(mm, block)) { 120 + start -= gpu_buddy_block_size(mm, block); 121 121 122 122 next = block->link.next; 123 123 if (next != head) 124 - block = list_entry(next, struct drm_buddy_block, 124 + block = list_entry(next, struct gpu_buddy_block, 125 125 link); 126 126 } 127 127 128 128 cur->mm = mm; 129 - cur->start = drm_buddy_block_offset(block) + start; 130 - cur->size = min(drm_buddy_block_size(mm, block) - start, 129 + cur->start = gpu_buddy_block_offset(block) + start; 130 + cur->size = min(gpu_buddy_block_size(mm, block) - start, 131 131 size); 132 132 cur->remaining = size; 133 133 cur->node = block; ··· 259 259 */ 260 260 static inline void xe_res_next(struct xe_res_cursor *cur, u64 size) 261 261 { 262 - struct drm_buddy_block *block; 262 + struct gpu_buddy_block *block; 263 263 struct list_head *next; 264 264 u64 start; 265 265 ··· 295 295 block = cur->node; 296 296 297 297 next = block->link.next; 298 - block = list_entry(next, struct drm_buddy_block, link); 298 + block = list_entry(next, struct gpu_buddy_block, link); 299 299 300 300 301 - while (start >= drm_buddy_block_size(cur->mm, block)) { 302 - start -= drm_buddy_block_size(cur->mm, block); 301 + while (start >= gpu_buddy_block_size(cur->mm, block)) { 302 + start -= gpu_buddy_block_size(cur->mm, block); 303 303 304 304 next = block->link.next; 305 - block = list_entry(next, struct drm_buddy_block, link); 305 + block = list_entry(next, struct gpu_buddy_block, link); 306 306 } 307 307 308 - cur->start = drm_buddy_block_offset(block) + start; 309 - cur->size = min(drm_buddy_block_size(cur->mm, block) - start, 308 + cur->start = gpu_buddy_block_offset(block) + start; 309 + cur->size = min(gpu_buddy_block_size(cur->mm, block) - start, 310 310 cur->remaining); 311 311 cur->node = block; 312 312 break;
+6 -6
drivers/gpu/drm/xe/xe_svm.c
··· 747 747 return PHYS_PFN(offset + xpagemap->hpa_base); 748 748 } 749 749 750 - static struct drm_buddy *vram_to_buddy(struct xe_vram_region *vram) 750 + static struct gpu_buddy *vram_to_buddy(struct xe_vram_region *vram) 751 751 { 752 752 return &vram->ttm.mm; 753 753 } ··· 758 758 struct xe_bo *bo = to_xe_bo(devmem_allocation); 759 759 struct ttm_resource *res = bo->ttm.resource; 760 760 struct list_head *blocks = &to_xe_ttm_vram_mgr_resource(res)->blocks; 761 - struct drm_buddy_block *block; 761 + struct gpu_buddy_block *block; 762 762 int j = 0; 763 763 764 764 list_for_each_entry(block, blocks, link) { 765 765 struct xe_vram_region *vr = block->private; 766 - struct drm_buddy *buddy = vram_to_buddy(vr); 766 + struct gpu_buddy *buddy = vram_to_buddy(vr); 767 767 u64 block_pfn = block_offset_to_pfn(devmem_allocation->dpagemap, 768 - drm_buddy_block_offset(block)); 768 + gpu_buddy_block_offset(block)); 769 769 int i; 770 770 771 - for (i = 0; i < drm_buddy_block_size(buddy, block) >> PAGE_SHIFT; ++i) 771 + for (i = 0; i < gpu_buddy_block_size(buddy, block) >> PAGE_SHIFT; ++i) 772 772 pfn[j++] = block_pfn + i; 773 773 } 774 774 ··· 1033 1033 struct dma_fence *pre_migrate_fence = NULL; 1034 1034 struct xe_device *xe = vr->xe; 1035 1035 struct device *dev = xe->drm.dev; 1036 - struct drm_buddy_block *block; 1036 + struct gpu_buddy_block *block; 1037 1037 struct xe_validation_ctx vctx; 1038 1038 struct list_head *blocks; 1039 1039 struct drm_exec exec;
+36 -35
drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
··· 6 6 7 7 #include <drm/drm_managed.h> 8 8 #include <drm/drm_drv.h> 9 + #include <drm/drm_buddy.h> 9 10 10 11 #include <drm/ttm/ttm_placement.h> 11 12 #include <drm/ttm/ttm_range_manager.h> ··· 17 16 #include "xe_ttm_vram_mgr.h" 18 17 #include "xe_vram_types.h" 19 18 20 - static inline struct drm_buddy_block * 19 + static inline struct gpu_buddy_block * 21 20 xe_ttm_vram_mgr_first_block(struct list_head *list) 22 21 { 23 - return list_first_entry_or_null(list, struct drm_buddy_block, link); 22 + return list_first_entry_or_null(list, struct gpu_buddy_block, link); 24 23 } 25 24 26 - static inline bool xe_is_vram_mgr_blocks_contiguous(struct drm_buddy *mm, 25 + static inline bool xe_is_vram_mgr_blocks_contiguous(struct gpu_buddy *mm, 27 26 struct list_head *head) 28 27 { 29 - struct drm_buddy_block *block; 28 + struct gpu_buddy_block *block; 30 29 u64 start, size; 31 30 32 31 block = xe_ttm_vram_mgr_first_block(head); ··· 34 33 return false; 35 34 36 35 while (head != block->link.next) { 37 - start = drm_buddy_block_offset(block); 38 - size = drm_buddy_block_size(mm, block); 36 + start = gpu_buddy_block_offset(block); 37 + size = gpu_buddy_block_size(mm, block); 39 38 40 - block = list_entry(block->link.next, struct drm_buddy_block, 39 + block = list_entry(block->link.next, struct gpu_buddy_block, 41 40 link); 42 - if (start + size != drm_buddy_block_offset(block)) 41 + if (start + size != gpu_buddy_block_offset(block)) 43 42 return false; 44 43 } 45 44 ··· 53 52 { 54 53 struct xe_ttm_vram_mgr *mgr = to_xe_ttm_vram_mgr(man); 55 54 struct xe_ttm_vram_mgr_resource *vres; 56 - struct drm_buddy *mm = &mgr->mm; 55 + struct gpu_buddy *mm = &mgr->mm; 57 56 u64 size, min_page_size; 58 57 unsigned long lpfn; 59 58 int err; ··· 80 79 INIT_LIST_HEAD(&vres->blocks); 81 80 82 81 if (place->flags & TTM_PL_FLAG_TOPDOWN) 83 - vres->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION; 82 + vres->flags |= GPU_BUDDY_TOPDOWN_ALLOCATION; 84 83 85 84 if (place->fpfn || lpfn != man->size >> PAGE_SHIFT) 86 - vres->flags |= DRM_BUDDY_RANGE_ALLOCATION; 85 + vres->flags |= GPU_BUDDY_RANGE_ALLOCATION; 87 86 88 87 if (WARN_ON(!vres->base.size)) { 89 88 err = -EINVAL; ··· 119 118 lpfn = max_t(unsigned long, place->fpfn + (size >> PAGE_SHIFT), lpfn); 120 119 } 121 120 122 - err = drm_buddy_alloc_blocks(mm, (u64)place->fpfn << PAGE_SHIFT, 121 + err = gpu_buddy_alloc_blocks(mm, (u64)place->fpfn << PAGE_SHIFT, 123 122 (u64)lpfn << PAGE_SHIFT, size, 124 123 min_page_size, &vres->blocks, vres->flags); 125 124 if (err) 126 125 goto error_unlock; 127 126 128 127 if (place->flags & TTM_PL_FLAG_CONTIGUOUS) { 129 - if (!drm_buddy_block_trim(mm, NULL, vres->base.size, &vres->blocks)) 128 + if (!gpu_buddy_block_trim(mm, NULL, vres->base.size, &vres->blocks)) 130 129 size = vres->base.size; 131 130 } 132 131 133 132 if (lpfn <= mgr->visible_size >> PAGE_SHIFT) { 134 133 vres->used_visible_size = size; 135 134 } else { 136 - struct drm_buddy_block *block; 135 + struct gpu_buddy_block *block; 137 136 138 137 list_for_each_entry(block, &vres->blocks, link) { 139 - u64 start = drm_buddy_block_offset(block); 138 + u64 start = gpu_buddy_block_offset(block); 140 139 141 140 if (start < mgr->visible_size) { 142 - u64 end = start + drm_buddy_block_size(mm, block); 141 + u64 end = start + gpu_buddy_block_size(mm, block); 143 142 144 143 vres->used_visible_size += 145 144 min(end, mgr->visible_size) - start; ··· 159 158 * the object. 160 159 */ 161 160 if (vres->base.placement & TTM_PL_FLAG_CONTIGUOUS) { 162 - struct drm_buddy_block *block = list_first_entry(&vres->blocks, 161 + struct gpu_buddy_block *block = list_first_entry(&vres->blocks, 163 162 typeof(*block), 164 163 link); 165 164 166 - vres->base.start = drm_buddy_block_offset(block) >> PAGE_SHIFT; 165 + vres->base.start = gpu_buddy_block_offset(block) >> PAGE_SHIFT; 167 166 } else { 168 167 vres->base.start = XE_BO_INVALID_OFFSET; 169 168 } ··· 185 184 struct xe_ttm_vram_mgr_resource *vres = 186 185 to_xe_ttm_vram_mgr_resource(res); 187 186 struct xe_ttm_vram_mgr *mgr = to_xe_ttm_vram_mgr(man); 188 - struct drm_buddy *mm = &mgr->mm; 187 + struct gpu_buddy *mm = &mgr->mm; 189 188 190 189 mutex_lock(&mgr->lock); 191 - drm_buddy_free_list(mm, &vres->blocks, 0); 190 + gpu_buddy_free_list(mm, &vres->blocks, 0); 192 191 mgr->visible_avail += vres->used_visible_size; 193 192 mutex_unlock(&mgr->lock); 194 193 ··· 201 200 struct drm_printer *printer) 202 201 { 203 202 struct xe_ttm_vram_mgr *mgr = to_xe_ttm_vram_mgr(man); 204 - struct drm_buddy *mm = &mgr->mm; 203 + struct gpu_buddy *mm = &mgr->mm; 205 204 206 205 mutex_lock(&mgr->lock); 207 206 drm_printf(printer, "default_page_size: %lluKiB\n", ··· 224 223 struct xe_ttm_vram_mgr *mgr = to_xe_ttm_vram_mgr(man); 225 224 struct xe_ttm_vram_mgr_resource *vres = 226 225 to_xe_ttm_vram_mgr_resource(res); 227 - struct drm_buddy *mm = &mgr->mm; 228 - struct drm_buddy_block *block; 226 + struct gpu_buddy *mm = &mgr->mm; 227 + struct gpu_buddy_block *block; 229 228 230 229 if (!place->fpfn && !place->lpfn) 231 230 return true; ··· 235 234 236 235 list_for_each_entry(block, &vres->blocks, link) { 237 236 unsigned long fpfn = 238 - drm_buddy_block_offset(block) >> PAGE_SHIFT; 237 + gpu_buddy_block_offset(block) >> PAGE_SHIFT; 239 238 unsigned long lpfn = fpfn + 240 - (drm_buddy_block_size(mm, block) >> PAGE_SHIFT); 239 + (gpu_buddy_block_size(mm, block) >> PAGE_SHIFT); 241 240 242 241 if (place->fpfn < lpfn && place->lpfn > fpfn) 243 242 return true; ··· 254 253 struct xe_ttm_vram_mgr *mgr = to_xe_ttm_vram_mgr(man); 255 254 struct xe_ttm_vram_mgr_resource *vres = 256 255 to_xe_ttm_vram_mgr_resource(res); 257 - struct drm_buddy *mm = &mgr->mm; 258 - struct drm_buddy_block *block; 256 + struct gpu_buddy *mm = &mgr->mm; 257 + struct gpu_buddy_block *block; 259 258 260 259 if (!place->fpfn && !place->lpfn) 261 260 return true; ··· 265 264 266 265 list_for_each_entry(block, &vres->blocks, link) { 267 266 unsigned long fpfn = 268 - drm_buddy_block_offset(block) >> PAGE_SHIFT; 267 + gpu_buddy_block_offset(block) >> PAGE_SHIFT; 269 268 unsigned long lpfn = fpfn + 270 - (drm_buddy_block_size(mm, block) >> PAGE_SHIFT); 269 + (gpu_buddy_block_size(mm, block) >> PAGE_SHIFT); 271 270 272 271 if (fpfn < place->fpfn || lpfn > place->lpfn) 273 272 return false; ··· 297 296 298 297 WARN_ON_ONCE(mgr->visible_avail != mgr->visible_size); 299 298 300 - drm_buddy_fini(&mgr->mm); 299 + gpu_buddy_fini(&mgr->mm); 301 300 302 301 ttm_resource_manager_cleanup(&mgr->manager); 303 302 ··· 328 327 mgr->visible_avail = io_size; 329 328 330 329 ttm_resource_manager_init(man, &xe->ttm, size); 331 - err = drm_buddy_init(&mgr->mm, man->size, default_page_size); 330 + err = gpu_buddy_init(&mgr->mm, man->size, default_page_size); 332 331 if (err) 333 332 return err; 334 333 ··· 376 375 if (!*sgt) 377 376 return -ENOMEM; 378 377 379 - /* Determine the number of DRM_BUDDY blocks to export */ 378 + /* Determine the number of GPU_BUDDY blocks to export */ 380 379 xe_res_first(res, offset, length, &cursor); 381 380 while (cursor.remaining) { 382 381 num_entries++; ··· 393 392 sg->length = 0; 394 393 395 394 /* 396 - * Walk down DRM_BUDDY blocks to populate scatterlist nodes 397 - * @note: Use iterator api to get first the DRM_BUDDY block 395 + * Walk down GPU_BUDDY blocks to populate scatterlist nodes 396 + * @note: Use iterator api to get first the GPU_BUDDY block 398 397 * and the number of bytes from it. Access the following 399 - * DRM_BUDDY block(s) if more buffer needs to exported 398 + * GPU_BUDDY block(s) if more buffer needs to exported 400 399 */ 401 400 xe_res_first(res, offset, length, &cursor); 402 401 for_each_sgtable_sg((*sgt), sg, i) {
+1 -1
drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h
··· 18 18 /** @manager: Base TTM resource manager */ 19 19 struct ttm_resource_manager manager; 20 20 /** @mm: DRM buddy allocator which manages the VRAM */ 21 - struct drm_buddy mm; 21 + struct gpu_buddy mm; 22 22 /** @visible_size: Proped size of the CPU visible portion */ 23 23 u64 visible_size; 24 24 /** @visible_avail: CPU visible portion still unallocated */
+1 -1
drivers/gpu/tests/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 3 3 gpu_buddy_tests-y = gpu_buddy_test.o gpu_random.o 4 - obj-$(CONFIG_DRM_KUNIT_TEST) += gpu_buddy_tests.o 4 + obj-$(CONFIG_GPU_BUDDY_KUNIT_TEST) += gpu_buddy_tests.o
+206 -206
drivers/gpu/tests/gpu_buddy_test.c
··· 21 21 return (1 << order) * chunk_size; 22 22 } 23 23 24 - static void drm_test_buddy_fragmentation_performance(struct kunit *test) 24 + static void gpu_test_buddy_fragmentation_performance(struct kunit *test) 25 25 { 26 - struct drm_buddy_block *block, *tmp; 26 + struct gpu_buddy_block *block, *tmp; 27 27 int num_blocks, i, ret, count = 0; 28 28 LIST_HEAD(allocated_blocks); 29 29 unsigned long elapsed_ms; ··· 32 32 LIST_HEAD(clear_list); 33 33 LIST_HEAD(dirty_list); 34 34 LIST_HEAD(free_list); 35 - struct drm_buddy mm; 35 + struct gpu_buddy mm; 36 36 u64 mm_size = SZ_4G; 37 37 ktime_t start, end; 38 38 ··· 47 47 * quickly the allocator can satisfy larger, aligned requests from a pool of 48 48 * highly fragmented space. 49 49 */ 50 - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K), 50 + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_init(&mm, mm_size, SZ_4K), 51 51 "buddy_init failed\n"); 52 52 53 53 num_blocks = mm_size / SZ_64K; ··· 55 55 start = ktime_get(); 56 56 /* Allocate with maximum fragmentation - 8K blocks with 64K alignment */ 57 57 for (i = 0; i < num_blocks; i++) 58 - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, SZ_8K, SZ_64K, 58 + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, SZ_8K, SZ_64K, 59 59 &allocated_blocks, 0), 60 60 "buddy_alloc hit an error size=%u\n", SZ_8K); 61 61 ··· 68 68 } 69 69 70 70 /* Free with different flags to ensure no coalescing */ 71 - drm_buddy_free_list(&mm, &clear_list, DRM_BUDDY_CLEARED); 72 - drm_buddy_free_list(&mm, &dirty_list, 0); 71 + gpu_buddy_free_list(&mm, &clear_list, GPU_BUDDY_CLEARED); 72 + gpu_buddy_free_list(&mm, &dirty_list, 0); 73 73 74 74 for (i = 0; i < num_blocks; i++) 75 - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, SZ_64K, SZ_64K, 75 + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, SZ_64K, SZ_64K, 76 76 &test_blocks, 0), 77 77 "buddy_alloc hit an error size=%u\n", SZ_64K); 78 - drm_buddy_free_list(&mm, &test_blocks, 0); 78 + gpu_buddy_free_list(&mm, &test_blocks, 0); 79 79 80 80 end = ktime_get(); 81 81 elapsed_ms = ktime_to_ms(ktime_sub(end, start)); 82 82 83 83 kunit_info(test, "Fragmented allocation took %lu ms\n", elapsed_ms); 84 84 85 - drm_buddy_fini(&mm); 85 + gpu_buddy_fini(&mm); 86 86 87 87 /* 88 88 * Reverse free order under fragmentation ··· 96 96 * deallocation occurs in the opposite order of allocation, exposing the 97 97 * cost difference between a linear freelist scan and an ordered tree lookup. 98 98 */ 99 - ret = drm_buddy_init(&mm, mm_size, SZ_4K); 99 + ret = gpu_buddy_init(&mm, mm_size, SZ_4K); 100 100 KUNIT_ASSERT_EQ(test, ret, 0); 101 101 102 102 start = ktime_get(); 103 103 /* Allocate maximum fragmentation */ 104 104 for (i = 0; i < num_blocks; i++) 105 - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, SZ_8K, SZ_64K, 105 + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, SZ_8K, SZ_64K, 106 106 &allocated_blocks, 0), 107 107 "buddy_alloc hit an error size=%u\n", SZ_8K); 108 108 ··· 111 111 list_move_tail(&block->link, &free_list); 112 112 count++; 113 113 } 114 - drm_buddy_free_list(&mm, &free_list, DRM_BUDDY_CLEARED); 114 + gpu_buddy_free_list(&mm, &free_list, GPU_BUDDY_CLEARED); 115 115 116 116 list_for_each_entry_safe_reverse(block, tmp, &allocated_blocks, link) 117 117 list_move(&block->link, &reverse_list); 118 - drm_buddy_free_list(&mm, &reverse_list, DRM_BUDDY_CLEARED); 118 + gpu_buddy_free_list(&mm, &reverse_list, GPU_BUDDY_CLEARED); 119 119 120 120 end = ktime_get(); 121 121 elapsed_ms = ktime_to_ms(ktime_sub(end, start)); 122 122 123 123 kunit_info(test, "Reverse-ordered free took %lu ms\n", elapsed_ms); 124 124 125 - drm_buddy_fini(&mm); 125 + gpu_buddy_fini(&mm); 126 126 } 127 127 128 - static void drm_test_buddy_alloc_range_bias(struct kunit *test) 128 + static void gpu_test_buddy_alloc_range_bias(struct kunit *test) 129 129 { 130 130 u32 mm_size, size, ps, bias_size, bias_start, bias_end, bias_rem; 131 - DRM_RND_STATE(prng, random_seed); 131 + GPU_RND_STATE(prng, random_seed); 132 132 unsigned int i, count, *order; 133 - struct drm_buddy_block *block; 133 + struct gpu_buddy_block *block; 134 134 unsigned long flags; 135 - struct drm_buddy mm; 135 + struct gpu_buddy mm; 136 136 LIST_HEAD(allocated); 137 137 138 138 bias_size = SZ_1M; ··· 142 142 143 143 kunit_info(test, "mm_size=%u, ps=%u\n", mm_size, ps); 144 144 145 - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps), 145 + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_init(&mm, mm_size, ps), 146 146 "buddy_init failed\n"); 147 147 148 148 count = mm_size / bias_size; 149 - order = drm_random_order(count, &prng); 149 + order = gpu_random_order(count, &prng); 150 150 KUNIT_EXPECT_TRUE(test, order); 151 151 152 152 /* ··· 166 166 167 167 /* internal round_up too big */ 168 168 KUNIT_ASSERT_TRUE_MSG(test, 169 - drm_buddy_alloc_blocks(&mm, bias_start, 169 + gpu_buddy_alloc_blocks(&mm, bias_start, 170 170 bias_end, bias_size + ps, bias_size, 171 171 &allocated, 172 - DRM_BUDDY_RANGE_ALLOCATION), 172 + GPU_BUDDY_RANGE_ALLOCATION), 173 173 "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n", 174 174 bias_start, bias_end, bias_size, bias_size); 175 175 176 176 /* size too big */ 177 177 KUNIT_ASSERT_TRUE_MSG(test, 178 - drm_buddy_alloc_blocks(&mm, bias_start, 178 + gpu_buddy_alloc_blocks(&mm, bias_start, 179 179 bias_end, bias_size + ps, ps, 180 180 &allocated, 181 - DRM_BUDDY_RANGE_ALLOCATION), 181 + GPU_BUDDY_RANGE_ALLOCATION), 182 182 "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n", 183 183 bias_start, bias_end, bias_size + ps, ps); 184 184 185 185 /* bias range too small for size */ 186 186 KUNIT_ASSERT_TRUE_MSG(test, 187 - drm_buddy_alloc_blocks(&mm, bias_start + ps, 187 + gpu_buddy_alloc_blocks(&mm, bias_start + ps, 188 188 bias_end, bias_size, ps, 189 189 &allocated, 190 - DRM_BUDDY_RANGE_ALLOCATION), 190 + GPU_BUDDY_RANGE_ALLOCATION), 191 191 "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n", 192 192 bias_start + ps, bias_end, bias_size, ps); 193 193 194 194 /* bias misaligned */ 195 195 KUNIT_ASSERT_TRUE_MSG(test, 196 - drm_buddy_alloc_blocks(&mm, bias_start + ps, 196 + gpu_buddy_alloc_blocks(&mm, bias_start + ps, 197 197 bias_end - ps, 198 198 bias_size >> 1, bias_size >> 1, 199 199 &allocated, 200 - DRM_BUDDY_RANGE_ALLOCATION), 200 + GPU_BUDDY_RANGE_ALLOCATION), 201 201 "buddy_alloc h didn't fail with bias(%x-%x), size=%u, ps=%u\n", 202 202 bias_start + ps, bias_end - ps, bias_size >> 1, bias_size >> 1); 203 203 204 204 /* single big page */ 205 205 KUNIT_ASSERT_FALSE_MSG(test, 206 - drm_buddy_alloc_blocks(&mm, bias_start, 206 + gpu_buddy_alloc_blocks(&mm, bias_start, 207 207 bias_end, bias_size, bias_size, 208 208 &tmp, 209 - DRM_BUDDY_RANGE_ALLOCATION), 209 + GPU_BUDDY_RANGE_ALLOCATION), 210 210 "buddy_alloc i failed with bias(%x-%x), size=%u, ps=%u\n", 211 211 bias_start, bias_end, bias_size, bias_size); 212 - drm_buddy_free_list(&mm, &tmp, 0); 212 + gpu_buddy_free_list(&mm, &tmp, 0); 213 213 214 214 /* single page with internal round_up */ 215 215 KUNIT_ASSERT_FALSE_MSG(test, 216 - drm_buddy_alloc_blocks(&mm, bias_start, 216 + gpu_buddy_alloc_blocks(&mm, bias_start, 217 217 bias_end, ps, bias_size, 218 218 &tmp, 219 - DRM_BUDDY_RANGE_ALLOCATION), 219 + GPU_BUDDY_RANGE_ALLOCATION), 220 220 "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n", 221 221 bias_start, bias_end, ps, bias_size); 222 - drm_buddy_free_list(&mm, &tmp, 0); 222 + gpu_buddy_free_list(&mm, &tmp, 0); 223 223 224 224 /* random size within */ 225 225 size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps); 226 226 if (size) 227 227 KUNIT_ASSERT_FALSE_MSG(test, 228 - drm_buddy_alloc_blocks(&mm, bias_start, 228 + gpu_buddy_alloc_blocks(&mm, bias_start, 229 229 bias_end, size, ps, 230 230 &tmp, 231 - DRM_BUDDY_RANGE_ALLOCATION), 231 + GPU_BUDDY_RANGE_ALLOCATION), 232 232 "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n", 233 233 bias_start, bias_end, size, ps); 234 234 235 235 bias_rem -= size; 236 236 /* too big for current avail */ 237 237 KUNIT_ASSERT_TRUE_MSG(test, 238 - drm_buddy_alloc_blocks(&mm, bias_start, 238 + gpu_buddy_alloc_blocks(&mm, bias_start, 239 239 bias_end, bias_rem + ps, ps, 240 240 &allocated, 241 - DRM_BUDDY_RANGE_ALLOCATION), 241 + GPU_BUDDY_RANGE_ALLOCATION), 242 242 "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n", 243 243 bias_start, bias_end, bias_rem + ps, ps); 244 244 ··· 248 248 size = max(size, ps); 249 249 250 250 KUNIT_ASSERT_FALSE_MSG(test, 251 - drm_buddy_alloc_blocks(&mm, bias_start, 251 + gpu_buddy_alloc_blocks(&mm, bias_start, 252 252 bias_end, size, ps, 253 253 &allocated, 254 - DRM_BUDDY_RANGE_ALLOCATION), 254 + GPU_BUDDY_RANGE_ALLOCATION), 255 255 "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n", 256 256 bias_start, bias_end, size, ps); 257 257 /* ··· 259 259 * unallocated, and ideally not always on the bias 260 260 * boundaries. 261 261 */ 262 - drm_buddy_free_list(&mm, &tmp, 0); 262 + gpu_buddy_free_list(&mm, &tmp, 0); 263 263 } else { 264 264 list_splice_tail(&tmp, &allocated); 265 265 } 266 266 } 267 267 268 268 kfree(order); 269 - drm_buddy_free_list(&mm, &allocated, 0); 270 - drm_buddy_fini(&mm); 269 + gpu_buddy_free_list(&mm, &allocated, 0); 270 + gpu_buddy_fini(&mm); 271 271 272 272 /* 273 273 * Something more free-form. Idea is to pick a random starting bias ··· 278 278 * allocated nodes in the middle of the address space. 279 279 */ 280 280 281 - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps), 281 + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_init(&mm, mm_size, ps), 282 282 "buddy_init failed\n"); 283 283 284 284 bias_start = round_up(prandom_u32_state(&prng) % (mm_size - ps), ps); ··· 290 290 u32 size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps); 291 291 292 292 KUNIT_ASSERT_FALSE_MSG(test, 293 - drm_buddy_alloc_blocks(&mm, bias_start, 293 + gpu_buddy_alloc_blocks(&mm, bias_start, 294 294 bias_end, size, ps, 295 295 &allocated, 296 - DRM_BUDDY_RANGE_ALLOCATION), 296 + GPU_BUDDY_RANGE_ALLOCATION), 297 297 "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n", 298 298 bias_start, bias_end, size, ps); 299 299 bias_rem -= size; ··· 319 319 KUNIT_ASSERT_EQ(test, bias_start, 0); 320 320 KUNIT_ASSERT_EQ(test, bias_end, mm_size); 321 321 KUNIT_ASSERT_TRUE_MSG(test, 322 - drm_buddy_alloc_blocks(&mm, bias_start, bias_end, 322 + gpu_buddy_alloc_blocks(&mm, bias_start, bias_end, 323 323 ps, ps, 324 324 &allocated, 325 - DRM_BUDDY_RANGE_ALLOCATION), 325 + GPU_BUDDY_RANGE_ALLOCATION), 326 326 "buddy_alloc passed with bias(%x-%x), size=%u\n", 327 327 bias_start, bias_end, ps); 328 328 329 - drm_buddy_free_list(&mm, &allocated, 0); 330 - drm_buddy_fini(&mm); 329 + gpu_buddy_free_list(&mm, &allocated, 0); 330 + gpu_buddy_fini(&mm); 331 331 332 332 /* 333 - * Allocate cleared blocks in the bias range when the DRM buddy's clear avail is 333 + * Allocate cleared blocks in the bias range when the GPU buddy's clear avail is 334 334 * zero. This will validate the bias range allocation in scenarios like system boot 335 335 * when no cleared blocks are available and exercise the fallback path too. The resulting 336 336 * blocks should always be dirty. 337 337 */ 338 338 339 - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps), 339 + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_init(&mm, mm_size, ps), 340 340 "buddy_init failed\n"); 341 341 342 342 bias_start = round_up(prandom_u32_state(&prng) % (mm_size - ps), ps); ··· 344 344 bias_end = max(bias_end, bias_start + ps); 345 345 bias_rem = bias_end - bias_start; 346 346 347 - flags = DRM_BUDDY_CLEAR_ALLOCATION | DRM_BUDDY_RANGE_ALLOCATION; 347 + flags = GPU_BUDDY_CLEAR_ALLOCATION | GPU_BUDDY_RANGE_ALLOCATION; 348 348 size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps); 349 349 350 350 KUNIT_ASSERT_FALSE_MSG(test, 351 - drm_buddy_alloc_blocks(&mm, bias_start, 351 + gpu_buddy_alloc_blocks(&mm, bias_start, 352 352 bias_end, size, ps, 353 353 &allocated, 354 354 flags), ··· 356 356 bias_start, bias_end, size, ps); 357 357 358 358 list_for_each_entry(block, &allocated, link) 359 - KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false); 359 + KUNIT_EXPECT_EQ(test, gpu_buddy_block_is_clear(block), false); 360 360 361 - drm_buddy_free_list(&mm, &allocated, 0); 362 - drm_buddy_fini(&mm); 361 + gpu_buddy_free_list(&mm, &allocated, 0); 362 + gpu_buddy_fini(&mm); 363 363 } 364 364 365 - static void drm_test_buddy_alloc_clear(struct kunit *test) 365 + static void gpu_test_buddy_alloc_clear(struct kunit *test) 366 366 { 367 367 unsigned long n_pages, total, i = 0; 368 368 const unsigned long ps = SZ_4K; 369 - struct drm_buddy_block *block; 369 + struct gpu_buddy_block *block; 370 370 const int max_order = 12; 371 371 LIST_HEAD(allocated); 372 - struct drm_buddy mm; 372 + struct gpu_buddy mm; 373 373 unsigned int order; 374 374 u32 mm_size, size; 375 375 LIST_HEAD(dirty); 376 376 LIST_HEAD(clean); 377 377 378 378 mm_size = SZ_4K << max_order; 379 - KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps)); 379 + KUNIT_EXPECT_FALSE(test, gpu_buddy_init(&mm, mm_size, ps)); 380 380 381 381 KUNIT_EXPECT_EQ(test, mm.max_order, max_order); 382 382 ··· 389 389 * is indeed all dirty pages and vice versa. Free it all again, 390 390 * keeping the dirty/clear status. 391 391 */ 392 - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, 392 + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, 393 393 5 * ps, ps, &allocated, 394 - DRM_BUDDY_TOPDOWN_ALLOCATION), 394 + GPU_BUDDY_TOPDOWN_ALLOCATION), 395 395 "buddy_alloc hit an error size=%lu\n", 5 * ps); 396 - drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED); 396 + gpu_buddy_free_list(&mm, &allocated, GPU_BUDDY_CLEARED); 397 397 398 398 n_pages = 10; 399 399 do { ··· 406 406 flags = 0; 407 407 } else { 408 408 list = &clean; 409 - flags = DRM_BUDDY_CLEAR_ALLOCATION; 409 + flags = GPU_BUDDY_CLEAR_ALLOCATION; 410 410 } 411 411 412 - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, 412 + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, 413 413 ps, ps, list, 414 414 flags), 415 415 "buddy_alloc hit an error size=%lu\n", ps); 416 416 } while (++i < n_pages); 417 417 418 418 list_for_each_entry(block, &clean, link) 419 - KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), true); 419 + KUNIT_EXPECT_EQ(test, gpu_buddy_block_is_clear(block), true); 420 420 421 421 list_for_each_entry(block, &dirty, link) 422 - KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false); 422 + KUNIT_EXPECT_EQ(test, gpu_buddy_block_is_clear(block), false); 423 423 424 - drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED); 424 + gpu_buddy_free_list(&mm, &clean, GPU_BUDDY_CLEARED); 425 425 426 426 /* 427 427 * Trying to go over the clear limit for some allocation. 428 428 * The allocation should never fail with reasonable page-size. 429 429 */ 430 - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, 430 + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, 431 431 10 * ps, ps, &clean, 432 - DRM_BUDDY_CLEAR_ALLOCATION), 432 + GPU_BUDDY_CLEAR_ALLOCATION), 433 433 "buddy_alloc hit an error size=%lu\n", 10 * ps); 434 434 435 - drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED); 436 - drm_buddy_free_list(&mm, &dirty, 0); 437 - drm_buddy_fini(&mm); 435 + gpu_buddy_free_list(&mm, &clean, GPU_BUDDY_CLEARED); 436 + gpu_buddy_free_list(&mm, &dirty, 0); 437 + gpu_buddy_fini(&mm); 438 438 439 - KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps)); 439 + KUNIT_EXPECT_FALSE(test, gpu_buddy_init(&mm, mm_size, ps)); 440 440 441 441 /* 442 442 * Create a new mm. Intentionally fragment the address space by creating ··· 458 458 else 459 459 list = &clean; 460 460 461 - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, 461 + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, 462 462 ps, ps, list, 0), 463 463 "buddy_alloc hit an error size=%lu\n", ps); 464 464 } while (++i < n_pages); 465 465 466 - drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED); 467 - drm_buddy_free_list(&mm, &dirty, 0); 466 + gpu_buddy_free_list(&mm, &clean, GPU_BUDDY_CLEARED); 467 + gpu_buddy_free_list(&mm, &dirty, 0); 468 468 469 469 order = 1; 470 470 do { 471 471 size = SZ_4K << order; 472 472 473 - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, 473 + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, 474 474 size, size, &allocated, 475 - DRM_BUDDY_CLEAR_ALLOCATION), 475 + GPU_BUDDY_CLEAR_ALLOCATION), 476 476 "buddy_alloc hit an error size=%u\n", size); 477 477 total = 0; 478 478 list_for_each_entry(block, &allocated, link) { 479 479 if (size != mm_size) 480 - KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false); 481 - total += drm_buddy_block_size(&mm, block); 480 + KUNIT_EXPECT_EQ(test, gpu_buddy_block_is_clear(block), false); 481 + total += gpu_buddy_block_size(&mm, block); 482 482 } 483 483 KUNIT_EXPECT_EQ(test, total, size); 484 484 485 - drm_buddy_free_list(&mm, &allocated, 0); 485 + gpu_buddy_free_list(&mm, &allocated, 0); 486 486 } while (++order <= max_order); 487 487 488 - drm_buddy_fini(&mm); 488 + gpu_buddy_fini(&mm); 489 489 490 490 /* 491 491 * Create a new mm with a non power-of-two size. Allocate a random size from each ··· 494 494 */ 495 495 mm_size = (SZ_4K << max_order) + (SZ_4K << (max_order - 2)); 496 496 497 - KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps)); 497 + KUNIT_EXPECT_FALSE(test, gpu_buddy_init(&mm, mm_size, ps)); 498 498 KUNIT_EXPECT_EQ(test, mm.max_order, max_order); 499 - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, SZ_4K << max_order, 499 + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, SZ_4K << max_order, 500 500 4 * ps, ps, &allocated, 501 - DRM_BUDDY_RANGE_ALLOCATION), 501 + GPU_BUDDY_RANGE_ALLOCATION), 502 502 "buddy_alloc hit an error size=%lu\n", 4 * ps); 503 - drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED); 504 - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, SZ_4K << max_order, 503 + gpu_buddy_free_list(&mm, &allocated, GPU_BUDDY_CLEARED); 504 + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, SZ_4K << max_order, 505 505 2 * ps, ps, &allocated, 506 - DRM_BUDDY_CLEAR_ALLOCATION), 506 + GPU_BUDDY_CLEAR_ALLOCATION), 507 507 "buddy_alloc hit an error size=%lu\n", 2 * ps); 508 - drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED); 509 - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, SZ_4K << max_order, mm_size, 508 + gpu_buddy_free_list(&mm, &allocated, GPU_BUDDY_CLEARED); 509 + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, SZ_4K << max_order, mm_size, 510 510 ps, ps, &allocated, 511 - DRM_BUDDY_RANGE_ALLOCATION), 511 + GPU_BUDDY_RANGE_ALLOCATION), 512 512 "buddy_alloc hit an error size=%lu\n", ps); 513 - drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED); 514 - drm_buddy_fini(&mm); 513 + gpu_buddy_free_list(&mm, &allocated, GPU_BUDDY_CLEARED); 514 + gpu_buddy_fini(&mm); 515 515 } 516 516 517 - static void drm_test_buddy_alloc_contiguous(struct kunit *test) 517 + static void gpu_test_buddy_alloc_contiguous(struct kunit *test) 518 518 { 519 519 const unsigned long ps = SZ_4K, mm_size = 16 * 3 * SZ_4K; 520 520 unsigned long i, n_pages, total; 521 - struct drm_buddy_block *block; 522 - struct drm_buddy mm; 521 + struct gpu_buddy_block *block; 522 + struct gpu_buddy mm; 523 523 LIST_HEAD(left); 524 524 LIST_HEAD(middle); 525 525 LIST_HEAD(right); 526 526 LIST_HEAD(allocated); 527 527 528 - KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps)); 528 + KUNIT_EXPECT_FALSE(test, gpu_buddy_init(&mm, mm_size, ps)); 529 529 530 530 /* 531 531 * Idea is to fragment the address space by alternating block 532 532 * allocations between three different lists; one for left, middle and 533 533 * right. We can then free a list to simulate fragmentation. In 534 - * particular we want to exercise the DRM_BUDDY_CONTIGUOUS_ALLOCATION, 534 + * particular we want to exercise the GPU_BUDDY_CONTIGUOUS_ALLOCATION, 535 535 * including the try_harder path. 536 536 */ 537 537 ··· 548 548 else 549 549 list = &right; 550 550 KUNIT_ASSERT_FALSE_MSG(test, 551 - drm_buddy_alloc_blocks(&mm, 0, mm_size, 551 + gpu_buddy_alloc_blocks(&mm, 0, mm_size, 552 552 ps, ps, list, 0), 553 553 "buddy_alloc hit an error size=%lu\n", 554 554 ps); 555 555 } while (++i < n_pages); 556 556 557 - KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, 557 + KUNIT_ASSERT_TRUE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, 558 558 3 * ps, ps, &allocated, 559 - DRM_BUDDY_CONTIGUOUS_ALLOCATION), 559 + GPU_BUDDY_CONTIGUOUS_ALLOCATION), 560 560 "buddy_alloc didn't error size=%lu\n", 3 * ps); 561 561 562 - drm_buddy_free_list(&mm, &middle, 0); 563 - KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, 562 + gpu_buddy_free_list(&mm, &middle, 0); 563 + KUNIT_ASSERT_TRUE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, 564 564 3 * ps, ps, &allocated, 565 - DRM_BUDDY_CONTIGUOUS_ALLOCATION), 565 + GPU_BUDDY_CONTIGUOUS_ALLOCATION), 566 566 "buddy_alloc didn't error size=%lu\n", 3 * ps); 567 - KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, 567 + KUNIT_ASSERT_TRUE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, 568 568 2 * ps, ps, &allocated, 569 - DRM_BUDDY_CONTIGUOUS_ALLOCATION), 569 + GPU_BUDDY_CONTIGUOUS_ALLOCATION), 570 570 "buddy_alloc didn't error size=%lu\n", 2 * ps); 571 571 572 - drm_buddy_free_list(&mm, &right, 0); 573 - KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, 572 + gpu_buddy_free_list(&mm, &right, 0); 573 + KUNIT_ASSERT_TRUE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, 574 574 3 * ps, ps, &allocated, 575 - DRM_BUDDY_CONTIGUOUS_ALLOCATION), 575 + GPU_BUDDY_CONTIGUOUS_ALLOCATION), 576 576 "buddy_alloc didn't error size=%lu\n", 3 * ps); 577 577 /* 578 578 * At this point we should have enough contiguous space for 2 blocks, 579 579 * however they are never buddies (since we freed middle and right) so 580 580 * will require the try_harder logic to find them. 581 581 */ 582 - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, 582 + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, 583 583 2 * ps, ps, &allocated, 584 - DRM_BUDDY_CONTIGUOUS_ALLOCATION), 584 + GPU_BUDDY_CONTIGUOUS_ALLOCATION), 585 585 "buddy_alloc hit an error size=%lu\n", 2 * ps); 586 586 587 - drm_buddy_free_list(&mm, &left, 0); 588 - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, 587 + gpu_buddy_free_list(&mm, &left, 0); 588 + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, 589 589 3 * ps, ps, &allocated, 590 - DRM_BUDDY_CONTIGUOUS_ALLOCATION), 590 + GPU_BUDDY_CONTIGUOUS_ALLOCATION), 591 591 "buddy_alloc hit an error size=%lu\n", 3 * ps); 592 592 593 593 total = 0; 594 594 list_for_each_entry(block, &allocated, link) 595 - total += drm_buddy_block_size(&mm, block); 595 + total += gpu_buddy_block_size(&mm, block); 596 596 597 597 KUNIT_ASSERT_EQ(test, total, ps * 2 + ps * 3); 598 598 599 - drm_buddy_free_list(&mm, &allocated, 0); 600 - drm_buddy_fini(&mm); 599 + gpu_buddy_free_list(&mm, &allocated, 0); 600 + gpu_buddy_fini(&mm); 601 601 } 602 602 603 - static void drm_test_buddy_alloc_pathological(struct kunit *test) 603 + static void gpu_test_buddy_alloc_pathological(struct kunit *test) 604 604 { 605 605 u64 mm_size, size, start = 0; 606 - struct drm_buddy_block *block; 606 + struct gpu_buddy_block *block; 607 607 const int max_order = 3; 608 608 unsigned long flags = 0; 609 609 int order, top; 610 - struct drm_buddy mm; 610 + struct gpu_buddy mm; 611 611 LIST_HEAD(blocks); 612 612 LIST_HEAD(holes); 613 613 LIST_HEAD(tmp); ··· 620 620 */ 621 621 622 622 mm_size = SZ_4K << max_order; 623 - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K), 623 + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_init(&mm, mm_size, SZ_4K), 624 624 "buddy_init failed\n"); 625 625 626 626 KUNIT_EXPECT_EQ(test, mm.max_order, max_order); ··· 630 630 block = list_first_entry_or_null(&blocks, typeof(*block), link); 631 631 if (block) { 632 632 list_del(&block->link); 633 - drm_buddy_free_block(&mm, block); 633 + gpu_buddy_free_block(&mm, block); 634 634 } 635 635 636 636 for (order = top; order--;) { 637 637 size = get_size(order, mm.chunk_size); 638 - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, 638 + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, start, 639 639 mm_size, size, size, 640 640 &tmp, flags), 641 641 "buddy_alloc hit -ENOMEM with order=%d, top=%d\n", 642 642 order, top); 643 643 644 - block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link); 644 + block = list_first_entry_or_null(&tmp, struct gpu_buddy_block, link); 645 645 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n"); 646 646 647 647 list_move_tail(&block->link, &blocks); ··· 649 649 650 650 /* There should be one final page for this sub-allocation */ 651 651 size = get_size(0, mm.chunk_size); 652 - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, 652 + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, start, mm_size, 653 653 size, size, &tmp, flags), 654 654 "buddy_alloc hit -ENOMEM for hole\n"); 655 655 656 - block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link); 656 + block = list_first_entry_or_null(&tmp, struct gpu_buddy_block, link); 657 657 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n"); 658 658 659 659 list_move_tail(&block->link, &holes); 660 660 661 661 size = get_size(top, mm.chunk_size); 662 - KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, 662 + KUNIT_ASSERT_TRUE_MSG(test, gpu_buddy_alloc_blocks(&mm, start, mm_size, 663 663 size, size, &tmp, flags), 664 664 "buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!", 665 665 top, max_order); 666 666 } 667 667 668 - drm_buddy_free_list(&mm, &holes, 0); 668 + gpu_buddy_free_list(&mm, &holes, 0); 669 669 670 670 /* Nothing larger than blocks of chunk_size now available */ 671 671 for (order = 1; order <= max_order; order++) { 672 672 size = get_size(order, mm.chunk_size); 673 - KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, 673 + KUNIT_ASSERT_TRUE_MSG(test, gpu_buddy_alloc_blocks(&mm, start, mm_size, 674 674 size, size, &tmp, flags), 675 675 "buddy_alloc unexpectedly succeeded at order %d, it should be full!", 676 676 order); 677 677 } 678 678 679 679 list_splice_tail(&holes, &blocks); 680 - drm_buddy_free_list(&mm, &blocks, 0); 681 - drm_buddy_fini(&mm); 680 + gpu_buddy_free_list(&mm, &blocks, 0); 681 + gpu_buddy_fini(&mm); 682 682 } 683 683 684 - static void drm_test_buddy_alloc_pessimistic(struct kunit *test) 684 + static void gpu_test_buddy_alloc_pessimistic(struct kunit *test) 685 685 { 686 686 u64 mm_size, size, start = 0; 687 - struct drm_buddy_block *block, *bn; 687 + struct gpu_buddy_block *block, *bn; 688 688 const unsigned int max_order = 16; 689 689 unsigned long flags = 0; 690 - struct drm_buddy mm; 690 + struct gpu_buddy mm; 691 691 unsigned int order; 692 692 LIST_HEAD(blocks); 693 693 LIST_HEAD(tmp); ··· 699 699 */ 700 700 701 701 mm_size = SZ_4K << max_order; 702 - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K), 702 + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_init(&mm, mm_size, SZ_4K), 703 703 "buddy_init failed\n"); 704 704 705 705 KUNIT_EXPECT_EQ(test, mm.max_order, max_order); 706 706 707 707 for (order = 0; order < max_order; order++) { 708 708 size = get_size(order, mm.chunk_size); 709 - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, 709 + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, start, mm_size, 710 710 size, size, &tmp, flags), 711 711 "buddy_alloc hit -ENOMEM with order=%d\n", 712 712 order); 713 713 714 - block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link); 714 + block = list_first_entry_or_null(&tmp, struct gpu_buddy_block, link); 715 715 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n"); 716 716 717 717 list_move_tail(&block->link, &blocks); ··· 719 719 720 720 /* And now the last remaining block available */ 721 721 size = get_size(0, mm.chunk_size); 722 - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, 722 + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, start, mm_size, 723 723 size, size, &tmp, flags), 724 724 "buddy_alloc hit -ENOMEM on final alloc\n"); 725 725 726 - block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link); 726 + block = list_first_entry_or_null(&tmp, struct gpu_buddy_block, link); 727 727 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n"); 728 728 729 729 list_move_tail(&block->link, &blocks); ··· 731 731 /* Should be completely full! */ 732 732 for (order = max_order; order--;) { 733 733 size = get_size(order, mm.chunk_size); 734 - KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, 734 + KUNIT_ASSERT_TRUE_MSG(test, gpu_buddy_alloc_blocks(&mm, start, mm_size, 735 735 size, size, &tmp, flags), 736 736 "buddy_alloc unexpectedly succeeded, it should be full!"); 737 737 } 738 738 739 739 block = list_last_entry(&blocks, typeof(*block), link); 740 740 list_del(&block->link); 741 - drm_buddy_free_block(&mm, block); 741 + gpu_buddy_free_block(&mm, block); 742 742 743 743 /* As we free in increasing size, we make available larger blocks */ 744 744 order = 1; 745 745 list_for_each_entry_safe(block, bn, &blocks, link) { 746 746 list_del(&block->link); 747 - drm_buddy_free_block(&mm, block); 747 + gpu_buddy_free_block(&mm, block); 748 748 749 749 size = get_size(order, mm.chunk_size); 750 - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, 750 + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, start, mm_size, 751 751 size, size, &tmp, flags), 752 752 "buddy_alloc hit -ENOMEM with order=%d\n", 753 753 order); 754 754 755 - block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link); 755 + block = list_first_entry_or_null(&tmp, struct gpu_buddy_block, link); 756 756 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n"); 757 757 758 758 list_del(&block->link); 759 - drm_buddy_free_block(&mm, block); 759 + gpu_buddy_free_block(&mm, block); 760 760 order++; 761 761 } 762 762 763 763 /* To confirm, now the whole mm should be available */ 764 764 size = get_size(max_order, mm.chunk_size); 765 - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, 765 + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, start, mm_size, 766 766 size, size, &tmp, flags), 767 767 "buddy_alloc (realloc) hit -ENOMEM with order=%d\n", 768 768 max_order); 769 769 770 - block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link); 770 + block = list_first_entry_or_null(&tmp, struct gpu_buddy_block, link); 771 771 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n"); 772 772 773 773 list_del(&block->link); 774 - drm_buddy_free_block(&mm, block); 775 - drm_buddy_free_list(&mm, &blocks, 0); 776 - drm_buddy_fini(&mm); 774 + gpu_buddy_free_block(&mm, block); 775 + gpu_buddy_free_list(&mm, &blocks, 0); 776 + gpu_buddy_fini(&mm); 777 777 } 778 778 779 - static void drm_test_buddy_alloc_optimistic(struct kunit *test) 779 + static void gpu_test_buddy_alloc_optimistic(struct kunit *test) 780 780 { 781 781 u64 mm_size, size, start = 0; 782 - struct drm_buddy_block *block; 782 + struct gpu_buddy_block *block; 783 783 unsigned long flags = 0; 784 784 const int max_order = 16; 785 - struct drm_buddy mm; 785 + struct gpu_buddy mm; 786 786 LIST_HEAD(blocks); 787 787 LIST_HEAD(tmp); 788 788 int order; ··· 794 794 795 795 mm_size = SZ_4K * ((1 << (max_order + 1)) - 1); 796 796 797 - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K), 797 + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_init(&mm, mm_size, SZ_4K), 798 798 "buddy_init failed\n"); 799 799 800 800 KUNIT_EXPECT_EQ(test, mm.max_order, max_order); 801 801 802 802 for (order = 0; order <= max_order; order++) { 803 803 size = get_size(order, mm.chunk_size); 804 - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, 804 + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, start, mm_size, 805 805 size, size, &tmp, flags), 806 806 "buddy_alloc hit -ENOMEM with order=%d\n", 807 807 order); 808 808 809 - block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link); 809 + block = list_first_entry_or_null(&tmp, struct gpu_buddy_block, link); 810 810 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n"); 811 811 812 812 list_move_tail(&block->link, &blocks); ··· 814 814 815 815 /* Should be completely full! */ 816 816 size = get_size(0, mm.chunk_size); 817 - KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, 817 + KUNIT_ASSERT_TRUE_MSG(test, gpu_buddy_alloc_blocks(&mm, start, mm_size, 818 818 size, size, &tmp, flags), 819 819 "buddy_alloc unexpectedly succeeded, it should be full!"); 820 820 821 - drm_buddy_free_list(&mm, &blocks, 0); 822 - drm_buddy_fini(&mm); 821 + gpu_buddy_free_list(&mm, &blocks, 0); 822 + gpu_buddy_fini(&mm); 823 823 } 824 824 825 - static void drm_test_buddy_alloc_limit(struct kunit *test) 825 + static void gpu_test_buddy_alloc_limit(struct kunit *test) 826 826 { 827 827 u64 size = U64_MAX, start = 0; 828 - struct drm_buddy_block *block; 828 + struct gpu_buddy_block *block; 829 829 unsigned long flags = 0; 830 830 LIST_HEAD(allocated); 831 - struct drm_buddy mm; 831 + struct gpu_buddy mm; 832 832 833 - KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, size, SZ_4K)); 833 + KUNIT_EXPECT_FALSE(test, gpu_buddy_init(&mm, size, SZ_4K)); 834 834 835 - KUNIT_EXPECT_EQ_MSG(test, mm.max_order, DRM_BUDDY_MAX_ORDER, 835 + KUNIT_EXPECT_EQ_MSG(test, mm.max_order, GPU_BUDDY_MAX_ORDER, 836 836 "mm.max_order(%d) != %d\n", mm.max_order, 837 - DRM_BUDDY_MAX_ORDER); 837 + GPU_BUDDY_MAX_ORDER); 838 838 839 839 size = mm.chunk_size << mm.max_order; 840 - KUNIT_EXPECT_FALSE(test, drm_buddy_alloc_blocks(&mm, start, size, size, 840 + KUNIT_EXPECT_FALSE(test, gpu_buddy_alloc_blocks(&mm, start, size, size, 841 841 mm.chunk_size, &allocated, flags)); 842 842 843 - block = list_first_entry_or_null(&allocated, struct drm_buddy_block, link); 843 + block = list_first_entry_or_null(&allocated, struct gpu_buddy_block, link); 844 844 KUNIT_EXPECT_TRUE(test, block); 845 845 846 - KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_order(block), mm.max_order, 846 + KUNIT_EXPECT_EQ_MSG(test, gpu_buddy_block_order(block), mm.max_order, 847 847 "block order(%d) != %d\n", 848 - drm_buddy_block_order(block), mm.max_order); 848 + gpu_buddy_block_order(block), mm.max_order); 849 849 850 - KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_size(&mm, block), 850 + KUNIT_EXPECT_EQ_MSG(test, gpu_buddy_block_size(&mm, block), 851 851 BIT_ULL(mm.max_order) * mm.chunk_size, 852 852 "block size(%llu) != %llu\n", 853 - drm_buddy_block_size(&mm, block), 853 + gpu_buddy_block_size(&mm, block), 854 854 BIT_ULL(mm.max_order) * mm.chunk_size); 855 855 856 - drm_buddy_free_list(&mm, &allocated, 0); 857 - drm_buddy_fini(&mm); 856 + gpu_buddy_free_list(&mm, &allocated, 0); 857 + gpu_buddy_fini(&mm); 858 858 } 859 859 860 - static void drm_test_buddy_alloc_exceeds_max_order(struct kunit *test) 860 + static void gpu_test_buddy_alloc_exceeds_max_order(struct kunit *test) 861 861 { 862 862 u64 mm_size = SZ_8G + SZ_2G, size = SZ_8G + SZ_1G, min_block_size = SZ_8G; 863 - struct drm_buddy mm; 863 + struct gpu_buddy mm; 864 864 LIST_HEAD(blocks); 865 865 int err; 866 866 867 - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K), 867 + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_init(&mm, mm_size, SZ_4K), 868 868 "buddy_init failed\n"); 869 869 870 870 /* CONTIGUOUS allocation should succeed via try_harder fallback */ 871 - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, size, 871 + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, size, 872 872 SZ_4K, &blocks, 873 - DRM_BUDDY_CONTIGUOUS_ALLOCATION), 873 + GPU_BUDDY_CONTIGUOUS_ALLOCATION), 874 874 "buddy_alloc hit an error size=%llu\n", size); 875 - drm_buddy_free_list(&mm, &blocks, 0); 875 + gpu_buddy_free_list(&mm, &blocks, 0); 876 876 877 877 /* Non-CONTIGUOUS with large min_block_size should return -EINVAL */ 878 - err = drm_buddy_alloc_blocks(&mm, 0, mm_size, size, min_block_size, &blocks, 0); 878 + err = gpu_buddy_alloc_blocks(&mm, 0, mm_size, size, min_block_size, &blocks, 0); 879 879 KUNIT_EXPECT_EQ(test, err, -EINVAL); 880 880 881 881 /* Non-CONTIGUOUS + RANGE with large min_block_size should return -EINVAL */ 882 - err = drm_buddy_alloc_blocks(&mm, 0, mm_size, size, min_block_size, &blocks, 883 - DRM_BUDDY_RANGE_ALLOCATION); 882 + err = gpu_buddy_alloc_blocks(&mm, 0, mm_size, size, min_block_size, &blocks, 883 + GPU_BUDDY_RANGE_ALLOCATION); 884 884 KUNIT_EXPECT_EQ(test, err, -EINVAL); 885 885 886 886 /* CONTIGUOUS + RANGE should return -EINVAL (no try_harder for RANGE) */ 887 - err = drm_buddy_alloc_blocks(&mm, 0, mm_size, size, SZ_4K, &blocks, 888 - DRM_BUDDY_CONTIGUOUS_ALLOCATION | DRM_BUDDY_RANGE_ALLOCATION); 887 + err = gpu_buddy_alloc_blocks(&mm, 0, mm_size, size, SZ_4K, &blocks, 888 + GPU_BUDDY_CONTIGUOUS_ALLOCATION | GPU_BUDDY_RANGE_ALLOCATION); 889 889 KUNIT_EXPECT_EQ(test, err, -EINVAL); 890 890 891 - drm_buddy_fini(&mm); 891 + gpu_buddy_fini(&mm); 892 892 } 893 893 894 - static int drm_buddy_suite_init(struct kunit_suite *suite) 894 + static int gpu_buddy_suite_init(struct kunit_suite *suite) 895 895 { 896 896 while (!random_seed) 897 897 random_seed = get_random_u32(); 898 898 899 - kunit_info(suite, "Testing DRM buddy manager, with random_seed=0x%x\n", 899 + kunit_info(suite, "Testing GPU buddy manager, with random_seed=0x%x\n", 900 900 random_seed); 901 901 902 902 return 0; 903 903 } 904 904 905 - static struct kunit_case drm_buddy_tests[] = { 906 - KUNIT_CASE(drm_test_buddy_alloc_limit), 907 - KUNIT_CASE(drm_test_buddy_alloc_optimistic), 908 - KUNIT_CASE(drm_test_buddy_alloc_pessimistic), 909 - KUNIT_CASE(drm_test_buddy_alloc_pathological), 910 - KUNIT_CASE(drm_test_buddy_alloc_contiguous), 911 - KUNIT_CASE(drm_test_buddy_alloc_clear), 912 - KUNIT_CASE(drm_test_buddy_alloc_range_bias), 913 - KUNIT_CASE(drm_test_buddy_fragmentation_performance), 914 - KUNIT_CASE(drm_test_buddy_alloc_exceeds_max_order), 905 + static struct kunit_case gpu_buddy_tests[] = { 906 + KUNIT_CASE(gpu_test_buddy_alloc_limit), 907 + KUNIT_CASE(gpu_test_buddy_alloc_optimistic), 908 + KUNIT_CASE(gpu_test_buddy_alloc_pessimistic), 909 + KUNIT_CASE(gpu_test_buddy_alloc_pathological), 910 + KUNIT_CASE(gpu_test_buddy_alloc_contiguous), 911 + KUNIT_CASE(gpu_test_buddy_alloc_clear), 912 + KUNIT_CASE(gpu_test_buddy_alloc_range_bias), 913 + KUNIT_CASE(gpu_test_buddy_fragmentation_performance), 914 + KUNIT_CASE(gpu_test_buddy_alloc_exceeds_max_order), 915 915 {} 916 916 }; 917 917 918 - static struct kunit_suite drm_buddy_test_suite = { 919 - .name = "drm_buddy", 920 - .suite_init = drm_buddy_suite_init, 921 - .test_cases = drm_buddy_tests, 918 + static struct kunit_suite gpu_buddy_test_suite = { 919 + .name = "gpu_buddy", 920 + .suite_init = gpu_buddy_suite_init, 921 + .test_cases = gpu_buddy_tests, 922 922 }; 923 923 924 - kunit_test_suite(drm_buddy_test_suite); 924 + kunit_test_suite(gpu_buddy_test_suite); 925 925 926 926 MODULE_AUTHOR("Intel Corporation"); 927 - MODULE_DESCRIPTION("Kunit test for drm_buddy functions"); 927 + MODULE_DESCRIPTION("Kunit test for gpu_buddy functions"); 928 928 MODULE_LICENSE("GPL");
+8 -8
drivers/gpu/tests/gpu_random.c
··· 8 8 9 9 #include "gpu_random.h" 10 10 11 - u32 drm_prandom_u32_max_state(u32 ep_ro, struct rnd_state *state) 11 + u32 gpu_prandom_u32_max_state(u32 ep_ro, struct rnd_state *state) 12 12 { 13 13 return upper_32_bits((u64)prandom_u32_state(state) * ep_ro); 14 14 } 15 - EXPORT_SYMBOL(drm_prandom_u32_max_state); 15 + EXPORT_SYMBOL(gpu_prandom_u32_max_state); 16 16 17 - void drm_random_reorder(unsigned int *order, unsigned int count, 17 + void gpu_random_reorder(unsigned int *order, unsigned int count, 18 18 struct rnd_state *state) 19 19 { 20 20 unsigned int i, j; 21 21 22 22 for (i = 0; i < count; ++i) { 23 23 BUILD_BUG_ON(sizeof(unsigned int) > sizeof(u32)); 24 - j = drm_prandom_u32_max_state(count, state); 24 + j = gpu_prandom_u32_max_state(count, state); 25 25 swap(order[i], order[j]); 26 26 } 27 27 } 28 - EXPORT_SYMBOL(drm_random_reorder); 28 + EXPORT_SYMBOL(gpu_random_reorder); 29 29 30 - unsigned int *drm_random_order(unsigned int count, struct rnd_state *state) 30 + unsigned int *gpu_random_order(unsigned int count, struct rnd_state *state) 31 31 { 32 32 unsigned int *order, i; 33 33 ··· 38 38 for (i = 0; i < count; i++) 39 39 order[i] = i; 40 40 41 - drm_random_reorder(order, count, state); 41 + gpu_random_reorder(order, count, state); 42 42 return order; 43 43 } 44 - EXPORT_SYMBOL(drm_random_order); 44 + EXPORT_SYMBOL(gpu_random_order);
+9 -9
drivers/gpu/tests/gpu_random.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 - #ifndef __DRM_RANDOM_H__ 3 - #define __DRM_RANDOM_H__ 2 + #ifndef __GPU_RANDOM_H__ 3 + #define __GPU_RANDOM_H__ 4 4 5 5 /* This is a temporary home for a couple of utility functions that should 6 6 * be transposed to lib/ at the earliest convenience. ··· 8 8 9 9 #include <linux/prandom.h> 10 10 11 - #define DRM_RND_STATE_INITIALIZER(seed__) ({ \ 11 + #define GPU_RND_STATE_INITIALIZER(seed__) ({ \ 12 12 struct rnd_state state__; \ 13 13 prandom_seed_state(&state__, (seed__)); \ 14 14 state__; \ 15 15 }) 16 16 17 - #define DRM_RND_STATE(name__, seed__) \ 18 - struct rnd_state name__ = DRM_RND_STATE_INITIALIZER(seed__) 17 + #define GPU_RND_STATE(name__, seed__) \ 18 + struct rnd_state name__ = GPU_RND_STATE_INITIALIZER(seed__) 19 19 20 - unsigned int *drm_random_order(unsigned int count, 20 + unsigned int *gpu_random_order(unsigned int count, 21 21 struct rnd_state *state); 22 - void drm_random_reorder(unsigned int *order, 22 + void gpu_random_reorder(unsigned int *order, 23 23 unsigned int count, 24 24 struct rnd_state *state); 25 - u32 drm_prandom_u32_max_state(u32 ep_ro, 25 + u32 gpu_prandom_u32_max_state(u32 ep_ro, 26 26 struct rnd_state *state); 27 27 28 - #endif /* !__DRM_RANDOM_H__ */ 28 + #endif /* !__GPU_RANDOM_H__ */
+1
drivers/video/Kconfig
··· 37 37 38 38 source "drivers/gpu/vga/Kconfig" 39 39 40 + source "drivers/gpu/Kconfig" 40 41 source "drivers/gpu/host1x/Kconfig" 41 42 source "drivers/gpu/ipu-v3/Kconfig" 42 43 source "drivers/gpu/nova-core/Kconfig"
+18
include/drm/drm_buddy.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2021 Intel Corporation 4 + */ 5 + 6 + #ifndef __DRM_BUDDY_H__ 7 + #define __DRM_BUDDY_H__ 8 + 9 + #include <linux/gpu_buddy.h> 10 + 11 + struct drm_printer; 12 + 13 + /* DRM-specific GPU Buddy Allocator print helpers */ 14 + void drm_buddy_print(struct gpu_buddy *mm, struct drm_printer *p); 15 + void drm_buddy_block_print(struct gpu_buddy *mm, 16 + struct gpu_buddy_block *block, 17 + struct drm_printer *p); 18 + #endif
+63 -57
include/linux/gpu_buddy.h
··· 3 3 * Copyright © 2021 Intel Corporation 4 4 */ 5 5 6 - #ifndef __DRM_BUDDY_H__ 7 - #define __DRM_BUDDY_H__ 6 + #ifndef __GPU_BUDDY_H__ 7 + #define __GPU_BUDDY_H__ 8 8 9 9 #include <linux/bitops.h> 10 10 #include <linux/list.h> ··· 12 12 #include <linux/sched.h> 13 13 #include <linux/rbtree.h> 14 14 15 - struct drm_printer; 15 + #define GPU_BUDDY_RANGE_ALLOCATION BIT(0) 16 + #define GPU_BUDDY_TOPDOWN_ALLOCATION BIT(1) 17 + #define GPU_BUDDY_CONTIGUOUS_ALLOCATION BIT(2) 18 + #define GPU_BUDDY_CLEAR_ALLOCATION BIT(3) 19 + #define GPU_BUDDY_CLEARED BIT(4) 20 + #define GPU_BUDDY_TRIM_DISABLE BIT(5) 16 21 17 - #define DRM_BUDDY_RANGE_ALLOCATION BIT(0) 18 - #define DRM_BUDDY_TOPDOWN_ALLOCATION BIT(1) 19 - #define DRM_BUDDY_CONTIGUOUS_ALLOCATION BIT(2) 20 - #define DRM_BUDDY_CLEAR_ALLOCATION BIT(3) 21 - #define DRM_BUDDY_CLEARED BIT(4) 22 - #define DRM_BUDDY_TRIM_DISABLE BIT(5) 22 + enum gpu_buddy_free_tree { 23 + GPU_BUDDY_CLEAR_TREE = 0, 24 + GPU_BUDDY_DIRTY_TREE, 25 + GPU_BUDDY_MAX_FREE_TREES, 26 + }; 23 27 24 - struct drm_buddy_block { 25 - #define DRM_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12) 26 - #define DRM_BUDDY_HEADER_STATE GENMASK_ULL(11, 10) 27 - #define DRM_BUDDY_ALLOCATED (1 << 10) 28 - #define DRM_BUDDY_FREE (2 << 10) 29 - #define DRM_BUDDY_SPLIT (3 << 10) 30 - #define DRM_BUDDY_HEADER_CLEAR GENMASK_ULL(9, 9) 28 + #define for_each_free_tree(tree) \ 29 + for ((tree) = 0; (tree) < GPU_BUDDY_MAX_FREE_TREES; (tree)++) 30 + 31 + struct gpu_buddy_block { 32 + #define GPU_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12) 33 + #define GPU_BUDDY_HEADER_STATE GENMASK_ULL(11, 10) 34 + #define GPU_BUDDY_ALLOCATED (1 << 10) 35 + #define GPU_BUDDY_FREE (2 << 10) 36 + #define GPU_BUDDY_SPLIT (3 << 10) 37 + #define GPU_BUDDY_HEADER_CLEAR GENMASK_ULL(9, 9) 31 38 /* Free to be used, if needed in the future */ 32 - #define DRM_BUDDY_HEADER_UNUSED GENMASK_ULL(8, 6) 33 - #define DRM_BUDDY_HEADER_ORDER GENMASK_ULL(5, 0) 39 + #define GPU_BUDDY_HEADER_UNUSED GENMASK_ULL(8, 6) 40 + #define GPU_BUDDY_HEADER_ORDER GENMASK_ULL(5, 0) 34 41 u64 header; 35 42 36 - struct drm_buddy_block *left; 37 - struct drm_buddy_block *right; 38 - struct drm_buddy_block *parent; 43 + struct gpu_buddy_block *left; 44 + struct gpu_buddy_block *right; 45 + struct gpu_buddy_block *parent; 39 46 40 47 void *private; /* owned by creator */ 41 48 42 49 /* 43 - * While the block is allocated by the user through drm_buddy_alloc*, 50 + * While the block is allocated by the user through gpu_buddy_alloc*, 44 51 * the user has ownership of the link, for example to maintain within 45 52 * a list, if so desired. As soon as the block is freed with 46 - * drm_buddy_free* ownership is given back to the mm. 53 + * gpu_buddy_free* ownership is given back to the mm. 47 54 */ 48 55 union { 49 56 struct rb_node rb; ··· 61 54 }; 62 55 63 56 /* Order-zero must be at least SZ_4K */ 64 - #define DRM_BUDDY_MAX_ORDER (63 - 12) 57 + #define GPU_BUDDY_MAX_ORDER (63 - 12) 65 58 66 59 /* 67 60 * Binary Buddy System. 68 61 * 69 62 * Locking should be handled by the user, a simple mutex around 70 - * drm_buddy_alloc* and drm_buddy_free* should suffice. 63 + * gpu_buddy_alloc* and gpu_buddy_free* should suffice. 71 64 */ 72 - struct drm_buddy { 65 + struct gpu_buddy { 73 66 /* Maintain a free list for each order. */ 74 67 struct rb_root **free_trees; 75 68 ··· 80 73 * block. Nodes are either allocated or free, in which case they will 81 74 * also exist on the respective free list. 82 75 */ 83 - struct drm_buddy_block **roots; 76 + struct gpu_buddy_block **roots; 84 77 85 78 /* 86 79 * Anything from here is public, and remains static for the lifetime of ··· 97 90 }; 98 91 99 92 static inline u64 100 - drm_buddy_block_offset(const struct drm_buddy_block *block) 93 + gpu_buddy_block_offset(const struct gpu_buddy_block *block) 101 94 { 102 - return block->header & DRM_BUDDY_HEADER_OFFSET; 95 + return block->header & GPU_BUDDY_HEADER_OFFSET; 103 96 } 104 97 105 98 static inline unsigned int 106 - drm_buddy_block_order(struct drm_buddy_block *block) 99 + gpu_buddy_block_order(struct gpu_buddy_block *block) 107 100 { 108 - return block->header & DRM_BUDDY_HEADER_ORDER; 101 + return block->header & GPU_BUDDY_HEADER_ORDER; 109 102 } 110 103 111 104 static inline unsigned int 112 - drm_buddy_block_state(struct drm_buddy_block *block) 105 + gpu_buddy_block_state(struct gpu_buddy_block *block) 113 106 { 114 - return block->header & DRM_BUDDY_HEADER_STATE; 107 + return block->header & GPU_BUDDY_HEADER_STATE; 115 108 } 116 109 117 110 static inline bool 118 - drm_buddy_block_is_allocated(struct drm_buddy_block *block) 111 + gpu_buddy_block_is_allocated(struct gpu_buddy_block *block) 119 112 { 120 - return drm_buddy_block_state(block) == DRM_BUDDY_ALLOCATED; 113 + return gpu_buddy_block_state(block) == GPU_BUDDY_ALLOCATED; 121 114 } 122 115 123 116 static inline bool 124 - drm_buddy_block_is_clear(struct drm_buddy_block *block) 117 + gpu_buddy_block_is_clear(struct gpu_buddy_block *block) 125 118 { 126 - return block->header & DRM_BUDDY_HEADER_CLEAR; 119 + return block->header & GPU_BUDDY_HEADER_CLEAR; 127 120 } 128 121 129 122 static inline bool 130 - drm_buddy_block_is_free(struct drm_buddy_block *block) 123 + gpu_buddy_block_is_free(struct gpu_buddy_block *block) 131 124 { 132 - return drm_buddy_block_state(block) == DRM_BUDDY_FREE; 125 + return gpu_buddy_block_state(block) == GPU_BUDDY_FREE; 133 126 } 134 127 135 128 static inline bool 136 - drm_buddy_block_is_split(struct drm_buddy_block *block) 129 + gpu_buddy_block_is_split(struct gpu_buddy_block *block) 137 130 { 138 - return drm_buddy_block_state(block) == DRM_BUDDY_SPLIT; 131 + return gpu_buddy_block_state(block) == GPU_BUDDY_SPLIT; 139 132 } 140 133 141 134 static inline u64 142 - drm_buddy_block_size(struct drm_buddy *mm, 143 - struct drm_buddy_block *block) 135 + gpu_buddy_block_size(struct gpu_buddy *mm, 136 + struct gpu_buddy_block *block) 144 137 { 145 - return mm->chunk_size << drm_buddy_block_order(block); 138 + return mm->chunk_size << gpu_buddy_block_order(block); 146 139 } 147 140 148 - int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size); 141 + int gpu_buddy_init(struct gpu_buddy *mm, u64 size, u64 chunk_size); 149 142 150 - void drm_buddy_fini(struct drm_buddy *mm); 143 + void gpu_buddy_fini(struct gpu_buddy *mm); 151 144 152 - struct drm_buddy_block * 153 - drm_get_buddy(struct drm_buddy_block *block); 145 + struct gpu_buddy_block * 146 + gpu_get_buddy(struct gpu_buddy_block *block); 154 147 155 - int drm_buddy_alloc_blocks(struct drm_buddy *mm, 148 + int gpu_buddy_alloc_blocks(struct gpu_buddy *mm, 156 149 u64 start, u64 end, u64 size, 157 150 u64 min_page_size, 158 151 struct list_head *blocks, 159 152 unsigned long flags); 160 153 161 - int drm_buddy_block_trim(struct drm_buddy *mm, 154 + int gpu_buddy_block_trim(struct gpu_buddy *mm, 162 155 u64 *start, 163 156 u64 new_size, 164 157 struct list_head *blocks); 165 158 166 - void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear); 159 + void gpu_buddy_reset_clear(struct gpu_buddy *mm, bool is_clear); 167 160 168 - void drm_buddy_free_block(struct drm_buddy *mm, struct drm_buddy_block *block); 161 + void gpu_buddy_free_block(struct gpu_buddy *mm, struct gpu_buddy_block *block); 169 162 170 - void drm_buddy_free_list(struct drm_buddy *mm, 163 + void gpu_buddy_free_list(struct gpu_buddy *mm, 171 164 struct list_head *objects, 172 165 unsigned int flags); 173 166 174 - void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p); 175 - void drm_buddy_block_print(struct drm_buddy *mm, 176 - struct drm_buddy_block *block, 177 - struct drm_printer *p); 167 + void gpu_buddy_print(struct gpu_buddy *mm); 168 + void gpu_buddy_block_print(struct gpu_buddy *mm, 169 + struct gpu_buddy_block *block); 178 170 #endif