Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'amd-drm-next-7.1-2026-04-01' of https://gitlab.freedesktop.org/agd5f/linux into drm-next

amd-drm-next-7.1-2026-04-01:

amdgpu:
- UserQ fixes
- PASID handling fix
- S4 fix for smu11 chips
- devcoredump fixes
- RAS fixes
- Misc small fixes
- DCN 4.2 updates
- DVI fixes
- DML fixes
- DC pipe validation fixes
- eDP DSC seamless boot
- DC FP rework
- swsmu cleanups
- GC 11.5.4 updates
- Add DC idle state manager
- Add support for using multiple engines for buffer fills and clears
- Misc SMU7 fixes

amdkfd:
- Non-4K page fixes
- Logging cleanups
- sysfs fixes

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patch.msgid.link/20260401184456.3576660-1-alexander.deucher@amd.com

+4153 -1892
+248 -217
drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
··· 373 373 return -ENODEV; 374 374 } 375 375 376 - int 377 - amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev, 376 + int amdgpu_atomfirmware_get_integrated_system_info(struct amdgpu_device *adev, 377 + int *vram_width, int *vram_type, 378 + int *vram_vendor) 379 + { 380 + struct amdgpu_mode_info *mode_info = &adev->mode_info; 381 + int index; 382 + u16 data_offset, size; 383 + union igp_info *igp_info; 384 + u8 frev, crev; 385 + u8 mem_type; 386 + u32 mem_channel_number; 387 + u32 mem_channel_width; 388 + 389 + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 390 + integratedsysteminfo); 391 + if (amdgpu_atom_parse_data_header(mode_info->atom_context, 392 + index, &size, 393 + &frev, &crev, &data_offset)) { 394 + igp_info = (union igp_info *) 395 + (mode_info->atom_context->bios + data_offset); 396 + switch (frev) { 397 + case 1: 398 + switch (crev) { 399 + case 11: 400 + case 12: 401 + mem_channel_number = igp_info->v11.umachannelnumber; 402 + if (!mem_channel_number) 403 + mem_channel_number = 1; 404 + mem_type = igp_info->v11.memorytype; 405 + if (mem_type == LpDdr5MemType) 406 + mem_channel_width = 32; 407 + else 408 + mem_channel_width = 64; 409 + if (vram_width) 410 + *vram_width = mem_channel_number * mem_channel_width; 411 + if (vram_type) 412 + *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 413 + break; 414 + default: 415 + return -EINVAL; 416 + } 417 + break; 418 + case 2: 419 + switch (crev) { 420 + case 1: 421 + case 2: 422 + mem_channel_number = igp_info->v21.umachannelnumber; 423 + if (!mem_channel_number) 424 + mem_channel_number = 1; 425 + mem_type = igp_info->v21.memorytype; 426 + if (mem_type == LpDdr5MemType) 427 + mem_channel_width = 32; 428 + else 429 + mem_channel_width = 64; 430 + if (vram_width) 431 + *vram_width = mem_channel_number * mem_channel_width; 432 + if (vram_type) 433 + *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 434 + break; 435 + case 3: 436 + mem_channel_number = igp_info->v23.umachannelnumber; 437 + if (!mem_channel_number) 438 + mem_channel_number = 1; 439 + mem_type = igp_info->v23.memorytype; 440 + if (mem_type == LpDdr5MemType) 441 + mem_channel_width = 32; 442 + else 443 + mem_channel_width = 64; 444 + if (vram_width) 445 + *vram_width = mem_channel_number * mem_channel_width; 446 + if (vram_type) 447 + *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 448 + break; 449 + default: 450 + return -EINVAL; 451 + } 452 + break; 453 + default: 454 + return -EINVAL; 455 + } 456 + } else { 457 + return -EINVAL; 458 + } 459 + return 0; 460 + } 461 + 462 + int amdgpu_atomfirmware_get_umc_info(struct amdgpu_device *adev, 463 + int *vram_width, int *vram_type, 464 + int *vram_vendor) 465 + { 466 + struct amdgpu_mode_info *mode_info = &adev->mode_info; 467 + int index; 468 + u16 data_offset, size; 469 + union umc_info *umc_info; 470 + u8 frev, crev; 471 + u8 mem_type; 472 + u8 mem_vendor; 473 + u32 mem_channel_number; 474 + u32 mem_channel_width; 475 + 476 + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, umc_info); 477 + 478 + if (amdgpu_atom_parse_data_header(mode_info->atom_context, 479 + index, &size, 480 + &frev, &crev, &data_offset)) { 481 + umc_info = (union umc_info *)(mode_info->atom_context->bios + data_offset); 482 + 483 + if (frev == 4) { 484 + switch (crev) { 485 + case 0: 486 + mem_channel_number = le32_to_cpu(umc_info->v40.channel_num); 487 + mem_type = le32_to_cpu(umc_info->v40.vram_type); 488 + mem_channel_width = le32_to_cpu(umc_info->v40.channel_width); 489 + mem_vendor = RREG32(adev->bios_scratch_reg_offset + 4) & 0xF; 490 + if (vram_vendor) 491 + *vram_vendor = mem_vendor; 492 + if (vram_type) 493 + *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 494 + if (vram_width) 495 + *vram_width = mem_channel_number * (1 << mem_channel_width); 496 + break; 497 + default: 498 + return -EINVAL; 499 + } 500 + } else { 501 + return -EINVAL; 502 + } 503 + } else { 504 + return -EINVAL; 505 + } 506 + 507 + return 0; 508 + } 509 + 510 + int amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev, 378 511 int *vram_width, int *vram_type, 379 512 int *vram_vendor) 380 513 { 381 514 struct amdgpu_mode_info *mode_info = &adev->mode_info; 382 515 int index, i = 0; 383 516 u16 data_offset, size; 384 - union igp_info *igp_info; 385 517 union vram_info *vram_info; 386 - union umc_info *umc_info; 387 518 union vram_module *vram_module; 388 519 u8 frev, crev; 389 520 u8 mem_type; ··· 523 392 u32 mem_channel_width; 524 393 u32 module_id; 525 394 526 - if (adev->flags & AMD_IS_APU) 527 - index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 528 - integratedsysteminfo); 529 - else { 530 - switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 531 - case IP_VERSION(12, 0, 0): 532 - case IP_VERSION(12, 0, 1): 533 - index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, umc_info); 534 - break; 535 - default: 536 - index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, vram_info); 537 - } 538 - } 395 + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, vram_info); 396 + 539 397 if (amdgpu_atom_parse_data_header(mode_info->atom_context, 540 398 index, &size, 541 399 &frev, &crev, &data_offset)) { 542 - if (adev->flags & AMD_IS_APU) { 543 - igp_info = (union igp_info *) 544 - (mode_info->atom_context->bios + data_offset); 545 - switch (frev) { 546 - case 1: 547 - switch (crev) { 548 - case 11: 549 - case 12: 550 - mem_channel_number = igp_info->v11.umachannelnumber; 551 - if (!mem_channel_number) 552 - mem_channel_number = 1; 553 - mem_type = igp_info->v11.memorytype; 554 - if (mem_type == LpDdr5MemType) 555 - mem_channel_width = 32; 556 - else 557 - mem_channel_width = 64; 558 - if (vram_width) 559 - *vram_width = mem_channel_number * mem_channel_width; 560 - if (vram_type) 561 - *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 562 - break; 563 - default: 564 - return -EINVAL; 565 - } 400 + vram_info = (union vram_info *) 401 + (mode_info->atom_context->bios + data_offset); 402 + 403 + module_id = (RREG32(adev->bios_scratch_reg_offset + 4) & 0x00ff0000) >> 16; 404 + if (frev == 3) { 405 + switch (crev) { 406 + /* v30 */ 407 + case 0: 408 + vram_module = (union vram_module *)vram_info->v30.vram_module; 409 + mem_vendor = (vram_module->v30.dram_vendor_id) & 0xF; 410 + if (vram_vendor) 411 + *vram_vendor = mem_vendor; 412 + mem_type = vram_info->v30.memory_type; 413 + if (vram_type) 414 + *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 415 + mem_channel_number = vram_info->v30.channel_num; 416 + mem_channel_width = vram_info->v30.channel_width; 417 + if (vram_width) 418 + *vram_width = mem_channel_number * 16; 566 419 break; 567 - case 2: 568 - switch (crev) { 569 - case 1: 570 - case 2: 571 - mem_channel_number = igp_info->v21.umachannelnumber; 572 - if (!mem_channel_number) 573 - mem_channel_number = 1; 574 - mem_type = igp_info->v21.memorytype; 575 - if (mem_type == LpDdr5MemType) 576 - mem_channel_width = 32; 577 - else 578 - mem_channel_width = 64; 579 - if (vram_width) 580 - *vram_width = mem_channel_number * mem_channel_width; 581 - if (vram_type) 582 - *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 583 - break; 584 - case 3: 585 - mem_channel_number = igp_info->v23.umachannelnumber; 586 - if (!mem_channel_number) 587 - mem_channel_number = 1; 588 - mem_type = igp_info->v23.memorytype; 589 - if (mem_type == LpDdr5MemType) 590 - mem_channel_width = 32; 591 - else 592 - mem_channel_width = 64; 593 - if (vram_width) 594 - *vram_width = mem_channel_number * mem_channel_width; 595 - if (vram_type) 596 - *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 597 - break; 598 - default: 599 - return -EINVAL; 420 + default: 421 + return -EINVAL; 422 + } 423 + } else if (frev == 2) { 424 + switch (crev) { 425 + /* v23 */ 426 + case 3: 427 + if (module_id > vram_info->v23.vram_module_num) 428 + module_id = 0; 429 + vram_module = (union vram_module *)vram_info->v23.vram_module; 430 + while (i < module_id) { 431 + vram_module = (union vram_module *) 432 + ((u8 *)vram_module + vram_module->v9.vram_module_size); 433 + i++; 600 434 } 435 + mem_type = vram_module->v9.memory_type; 436 + if (vram_type) 437 + *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 438 + mem_channel_number = vram_module->v9.channel_num; 439 + mem_channel_width = vram_module->v9.channel_width; 440 + if (vram_width) 441 + *vram_width = mem_channel_number * (1 << mem_channel_width); 442 + mem_vendor = (vram_module->v9.vender_rev_id) & 0xF; 443 + if (vram_vendor) 444 + *vram_vendor = mem_vendor; 445 + break; 446 + /* v24 */ 447 + case 4: 448 + if (module_id > vram_info->v24.vram_module_num) 449 + module_id = 0; 450 + vram_module = (union vram_module *)vram_info->v24.vram_module; 451 + while (i < module_id) { 452 + vram_module = (union vram_module *) 453 + ((u8 *)vram_module + vram_module->v10.vram_module_size); 454 + i++; 455 + } 456 + mem_type = vram_module->v10.memory_type; 457 + if (vram_type) 458 + *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 459 + mem_channel_number = vram_module->v10.channel_num; 460 + mem_channel_width = vram_module->v10.channel_width; 461 + if (vram_width) 462 + *vram_width = mem_channel_number * (1 << mem_channel_width); 463 + mem_vendor = (vram_module->v10.vender_rev_id) & 0xF; 464 + if (vram_vendor) 465 + *vram_vendor = mem_vendor; 466 + break; 467 + /* v25 */ 468 + case 5: 469 + if (module_id > vram_info->v25.vram_module_num) 470 + module_id = 0; 471 + vram_module = (union vram_module *)vram_info->v25.vram_module; 472 + while (i < module_id) { 473 + vram_module = (union vram_module *) 474 + ((u8 *)vram_module + vram_module->v11.vram_module_size); 475 + i++; 476 + } 477 + mem_type = vram_module->v11.memory_type; 478 + if (vram_type) 479 + *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 480 + mem_channel_number = vram_module->v11.channel_num; 481 + mem_channel_width = vram_module->v11.channel_width; 482 + if (vram_width) 483 + *vram_width = mem_channel_number * (1 << mem_channel_width); 484 + mem_vendor = (vram_module->v11.vender_rev_id) & 0xF; 485 + if (vram_vendor) 486 + *vram_vendor = mem_vendor; 487 + break; 488 + /* v26 */ 489 + case 6: 490 + if (module_id > vram_info->v26.vram_module_num) 491 + module_id = 0; 492 + vram_module = (union vram_module *)vram_info->v26.vram_module; 493 + while (i < module_id) { 494 + vram_module = (union vram_module *) 495 + ((u8 *)vram_module + vram_module->v9.vram_module_size); 496 + i++; 497 + } 498 + mem_type = vram_module->v9.memory_type; 499 + if (vram_type) 500 + *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 501 + mem_channel_number = vram_module->v9.channel_num; 502 + mem_channel_width = vram_module->v9.channel_width; 503 + if (vram_width) 504 + *vram_width = mem_channel_number * (1 << mem_channel_width); 505 + mem_vendor = (vram_module->v9.vender_rev_id) & 0xF; 506 + if (vram_vendor) 507 + *vram_vendor = mem_vendor; 601 508 break; 602 509 default: 603 510 return -EINVAL; 604 511 } 605 512 } else { 606 - switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 607 - case IP_VERSION(12, 0, 0): 608 - case IP_VERSION(12, 0, 1): 609 - umc_info = (union umc_info *)(mode_info->atom_context->bios + data_offset); 610 - 611 - if (frev == 4) { 612 - switch (crev) { 613 - case 0: 614 - mem_channel_number = le32_to_cpu(umc_info->v40.channel_num); 615 - mem_type = le32_to_cpu(umc_info->v40.vram_type); 616 - mem_channel_width = le32_to_cpu(umc_info->v40.channel_width); 617 - mem_vendor = RREG32(adev->bios_scratch_reg_offset + 4) & 0xF; 618 - if (vram_vendor) 619 - *vram_vendor = mem_vendor; 620 - if (vram_type) 621 - *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 622 - if (vram_width) 623 - *vram_width = mem_channel_number * (1 << mem_channel_width); 624 - break; 625 - default: 626 - return -EINVAL; 627 - } 628 - } else 629 - return -EINVAL; 630 - break; 631 - default: 632 - vram_info = (union vram_info *) 633 - (mode_info->atom_context->bios + data_offset); 634 - 635 - module_id = (RREG32(adev->bios_scratch_reg_offset + 4) & 0x00ff0000) >> 16; 636 - if (frev == 3) { 637 - switch (crev) { 638 - /* v30 */ 639 - case 0: 640 - vram_module = (union vram_module *)vram_info->v30.vram_module; 641 - mem_vendor = (vram_module->v30.dram_vendor_id) & 0xF; 642 - if (vram_vendor) 643 - *vram_vendor = mem_vendor; 644 - mem_type = vram_info->v30.memory_type; 645 - if (vram_type) 646 - *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 647 - mem_channel_number = vram_info->v30.channel_num; 648 - mem_channel_width = vram_info->v30.channel_width; 649 - if (vram_width) 650 - *vram_width = mem_channel_number * 16; 651 - break; 652 - default: 653 - return -EINVAL; 654 - } 655 - } else if (frev == 2) { 656 - switch (crev) { 657 - /* v23 */ 658 - case 3: 659 - if (module_id > vram_info->v23.vram_module_num) 660 - module_id = 0; 661 - vram_module = (union vram_module *)vram_info->v23.vram_module; 662 - while (i < module_id) { 663 - vram_module = (union vram_module *) 664 - ((u8 *)vram_module + vram_module->v9.vram_module_size); 665 - i++; 666 - } 667 - mem_type = vram_module->v9.memory_type; 668 - if (vram_type) 669 - *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 670 - mem_channel_number = vram_module->v9.channel_num; 671 - mem_channel_width = vram_module->v9.channel_width; 672 - if (vram_width) 673 - *vram_width = mem_channel_number * (1 << mem_channel_width); 674 - mem_vendor = (vram_module->v9.vender_rev_id) & 0xF; 675 - if (vram_vendor) 676 - *vram_vendor = mem_vendor; 677 - break; 678 - /* v24 */ 679 - case 4: 680 - if (module_id > vram_info->v24.vram_module_num) 681 - module_id = 0; 682 - vram_module = (union vram_module *)vram_info->v24.vram_module; 683 - while (i < module_id) { 684 - vram_module = (union vram_module *) 685 - ((u8 *)vram_module + vram_module->v10.vram_module_size); 686 - i++; 687 - } 688 - mem_type = vram_module->v10.memory_type; 689 - if (vram_type) 690 - *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 691 - mem_channel_number = vram_module->v10.channel_num; 692 - mem_channel_width = vram_module->v10.channel_width; 693 - if (vram_width) 694 - *vram_width = mem_channel_number * (1 << mem_channel_width); 695 - mem_vendor = (vram_module->v10.vender_rev_id) & 0xF; 696 - if (vram_vendor) 697 - *vram_vendor = mem_vendor; 698 - break; 699 - /* v25 */ 700 - case 5: 701 - if (module_id > vram_info->v25.vram_module_num) 702 - module_id = 0; 703 - vram_module = (union vram_module *)vram_info->v25.vram_module; 704 - while (i < module_id) { 705 - vram_module = (union vram_module *) 706 - ((u8 *)vram_module + vram_module->v11.vram_module_size); 707 - i++; 708 - } 709 - mem_type = vram_module->v11.memory_type; 710 - if (vram_type) 711 - *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 712 - mem_channel_number = vram_module->v11.channel_num; 713 - mem_channel_width = vram_module->v11.channel_width; 714 - if (vram_width) 715 - *vram_width = mem_channel_number * (1 << mem_channel_width); 716 - mem_vendor = (vram_module->v11.vender_rev_id) & 0xF; 717 - if (vram_vendor) 718 - *vram_vendor = mem_vendor; 719 - break; 720 - /* v26 */ 721 - case 6: 722 - if (module_id > vram_info->v26.vram_module_num) 723 - module_id = 0; 724 - vram_module = (union vram_module *)vram_info->v26.vram_module; 725 - while (i < module_id) { 726 - vram_module = (union vram_module *) 727 - ((u8 *)vram_module + vram_module->v9.vram_module_size); 728 - i++; 729 - } 730 - mem_type = vram_module->v9.memory_type; 731 - if (vram_type) 732 - *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 733 - mem_channel_number = vram_module->v9.channel_num; 734 - mem_channel_width = vram_module->v9.channel_width; 735 - if (vram_width) 736 - *vram_width = mem_channel_number * (1 << mem_channel_width); 737 - mem_vendor = (vram_module->v9.vender_rev_id) & 0xF; 738 - if (vram_vendor) 739 - *vram_vendor = mem_vendor; 740 - break; 741 - default: 742 - return -EINVAL; 743 - } 744 - } else { 745 - /* invalid frev */ 746 - return -EINVAL; 747 - } 748 - } 513 + /* invalid frev */ 514 + return -EINVAL; 749 515 } 516 + 517 + } else { 518 + return -EINVAL; 750 519 } 751 520 752 521 return 0;
+4
drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
··· 30 30 bool amdgpu_atomfirmware_gpu_virtualization_supported(struct amdgpu_device *adev); 31 31 void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev); 32 32 int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev); 33 + int amdgpu_atomfirmware_get_integrated_system_info(struct amdgpu_device *adev, 34 + int *vram_width, int *vram_type, int *vram_vendor); 35 + int amdgpu_atomfirmware_get_umc_info(struct amdgpu_device *adev, 36 + int *vram_width, int *vram_type, int *vram_vendor); 33 37 int amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev, 34 38 int *vram_width, int *vram_type, int *vram_vendor); 35 39 int amdgpu_atomfirmware_get_uma_carveout_info(struct amdgpu_device *adev,
+2 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
··· 908 908 goto out_free_user_pages; 909 909 910 910 amdgpu_bo_list_for_each_entry(e, p->bo_list) { 911 - /* One fence for TTM and one for each CS job */ 912 911 r = drm_exec_prepare_obj(&p->exec, &e->bo->tbo.base, 913 - 1 + p->gang_size); 912 + TTM_NUM_MOVE_FENCES + p->gang_size); 914 913 drm_exec_retry_on_contention(&p->exec); 915 914 if (unlikely(r)) 916 915 goto out_free_user_pages; ··· 919 920 920 921 if (p->uf_bo) { 921 922 r = drm_exec_prepare_obj(&p->exec, &p->uf_bo->tbo.base, 922 - 1 + p->gang_size); 923 + TTM_NUM_MOVE_FENCES + p->gang_size); 923 924 drm_exec_retry_on_contention(&p->exec); 924 925 if (unlikely(r)) 925 926 goto out_free_user_pages;
+19 -6
drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
··· 35 35 void amdgpu_coredump_init(struct amdgpu_device *adev) 36 36 { 37 37 } 38 + void amdgpu_coredump_fini(struct amdgpu_device *adev) 39 + { 40 + } 38 41 #else 39 42 40 43 #define AMDGPU_CORE_DUMP_SIZE_MAX (256 * 1024 * 1024) ··· 195 192 drm_printf(p, "VPE feature version: %u, fw version: 0x%08x\n", 196 193 adev->vpe.feature_version, adev->vpe.fw_version); 197 194 198 - drm_printf(p, "\nVBIOS Information\n"); 199 - drm_printf(p, "vbios name : %s\n", ctx->name); 200 - drm_printf(p, "vbios pn : %s\n", ctx->vbios_pn); 201 - drm_printf(p, "vbios version : %d\n", ctx->version); 202 - drm_printf(p, "vbios ver_str : %s\n", ctx->vbios_ver_str); 203 - drm_printf(p, "vbios date : %s\n", ctx->date); 195 + if (adev->bios) { 196 + drm_printf(p, "\nVBIOS Information\n"); 197 + drm_printf(p, "vbios name : %s\n", ctx->name); 198 + drm_printf(p, "vbios pn : %s\n", ctx->vbios_pn); 199 + drm_printf(p, "vbios version : %d\n", ctx->version); 200 + drm_printf(p, "vbios ver_str : %s\n", ctx->vbios_ver_str); 201 + drm_printf(p, "vbios date : %s\n", ctx->date); 202 + }else { 203 + drm_printf(p, "\nVBIOS Information: NA\n"); 204 + } 204 205 } 205 206 206 207 static ssize_t ··· 442 435 void amdgpu_coredump_init(struct amdgpu_device *adev) 443 436 { 444 437 INIT_WORK(&adev->coredump_work, amdgpu_devcoredump_deferred_work); 438 + } 439 + 440 + void amdgpu_coredump_fini(struct amdgpu_device *adev) 441 + { 442 + /* Finish deferred coredump formatting before HW/IP teardown. */ 443 + flush_work(&adev->coredump_work); 445 444 } 446 445 #endif
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h
··· 50 50 void amdgpu_coredump(struct amdgpu_device *adev, bool skip_vram_check, 51 51 bool vram_lost, struct amdgpu_job *job); 52 52 void amdgpu_coredump_init(struct amdgpu_device *adev); 53 + void amdgpu_coredump_fini(struct amdgpu_device *adev); 53 54 #endif
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 4225 4225 if (pci_dev_is_disconnected(adev->pdev)) 4226 4226 amdgpu_amdkfd_device_fini_sw(adev); 4227 4227 4228 + amdgpu_coredump_fini(adev); 4228 4229 amdgpu_device_ip_fini_early(adev); 4229 4230 4230 4231 amdgpu_irq_fini_hw(adev);
+6 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
··· 2683 2683 if (r) 2684 2684 return r; 2685 2685 2686 - if (amdgpu_acpi_should_gpu_reset(adev)) 2687 - return amdgpu_asic_reset(adev); 2686 + if (amdgpu_acpi_should_gpu_reset(adev)) { 2687 + amdgpu_device_lock_reset_domain(adev->reset_domain); 2688 + r = amdgpu_asic_reset(adev); 2689 + amdgpu_device_unlock_reset_domain(adev->reset_domain); 2690 + return r; 2691 + } 2688 2692 return 0; 2689 2693 } 2690 2694
+44
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
··· 404 404 } 405 405 406 406 /** 407 + * amdgpu_gart_map_gfx9_mqd - map mqd and ctrl_stack dma_addresses into GART entries 408 + * 409 + * @adev: amdgpu_device pointer 410 + * @offset: offset into the GPU's gart aperture 411 + * @pages: number of pages to bind 412 + * @dma_addr: DMA addresses of pages 413 + * @flags: page table entry flags 414 + * 415 + * Map the MQD and control stack addresses into GART entries with the correct 416 + * memory types on gfxv9. The MQD occupies the first 4KB and is followed by 417 + * the control stack. The MQD uses UC (uncached) memory, while the control stack 418 + * uses NC (non-coherent) memory. 419 + */ 420 + void amdgpu_gart_map_gfx9_mqd(struct amdgpu_device *adev, uint64_t offset, 421 + int pages, dma_addr_t *dma_addr, uint64_t flags) 422 + { 423 + uint64_t page_base; 424 + unsigned int i, j, t; 425 + int idx; 426 + uint64_t ctrl_flags = AMDGPU_PTE_MTYPE_VG10(flags, AMDGPU_MTYPE_NC); 427 + void *dst; 428 + 429 + if (!adev->gart.ptr) 430 + return; 431 + 432 + if (!drm_dev_enter(adev_to_drm(adev), &idx)) 433 + return; 434 + 435 + t = offset / AMDGPU_GPU_PAGE_SIZE; 436 + dst = adev->gart.ptr; 437 + for (i = 0; i < pages; i++) { 438 + page_base = dma_addr[i]; 439 + for (j = 0; j < AMDGPU_GPU_PAGES_IN_CPU_PAGE; j++, t++) { 440 + if ((i == 0) && (j == 0)) 441 + amdgpu_gmc_set_pte_pde(adev, dst, t, page_base, flags); 442 + else 443 + amdgpu_gmc_set_pte_pde(adev, dst, t, page_base, ctrl_flags); 444 + page_base += AMDGPU_GPU_PAGE_SIZE; 445 + } 446 + } 447 + drm_dev_exit(idx); 448 + } 449 + 450 + /** 407 451 * amdgpu_gart_bind - bind pages into the gart page table 408 452 * 409 453 * @adev: amdgpu_device pointer
+2
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
··· 62 62 void amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset, 63 63 int pages, dma_addr_t *dma_addr, uint64_t flags, 64 64 void *dst); 65 + void amdgpu_gart_map_gfx9_mqd(struct amdgpu_device *adev, uint64_t offset, 66 + int pages, dma_addr_t *dma_addr, uint64_t flags); 65 67 void amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, 66 68 int pages, dma_addr_t *dma_addr, uint64_t flags); 67 69 void amdgpu_gart_map_vram_range(struct amdgpu_device *adev, uint64_t pa,
+29
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
··· 34 34 #include "amdgpu_ras.h" 35 35 #include "amdgpu_reset.h" 36 36 #include "amdgpu_xgmi.h" 37 + #include "amdgpu_atomfirmware.h" 37 38 38 39 #include <drm/drm_drv.h> 39 40 #include <drm/ttm/ttm_tt.h> ··· 1746 1745 "Mem ranges not matching with hardware config\n"); 1747 1746 } 1748 1747 1748 + return 0; 1749 + } 1750 + 1751 + int amdgpu_gmc_get_vram_info(struct amdgpu_device *adev, 1752 + int *vram_width, int *vram_type, int *vram_vendor) 1753 + { 1754 + int ret = 0; 1755 + 1756 + if (adev->flags & AMD_IS_APU) 1757 + return amdgpu_atomfirmware_get_integrated_system_info(adev, 1758 + vram_width, vram_type, vram_vendor); 1759 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1760 + case IP_VERSION(12, 0, 0): 1761 + case IP_VERSION(12, 0, 1): 1762 + return amdgpu_atomfirmware_get_umc_info(adev, 1763 + vram_width, vram_type, vram_vendor); 1764 + case IP_VERSION(9, 5, 0): 1765 + case IP_VERSION(9, 4, 4): 1766 + case IP_VERSION(9, 4, 3): 1767 + ret = amdgpu_atomfirmware_get_umc_info(adev, 1768 + vram_width, vram_type, vram_vendor); 1769 + if (vram_width && !ret) 1770 + *vram_width *= hweight32(adev->aid_mask); 1771 + return ret; 1772 + default: 1773 + return amdgpu_atomfirmware_get_vram_info(adev, 1774 + vram_width, vram_type, vram_vendor); 1775 + } 1749 1776 return 0; 1750 1777 }
+2
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
··· 482 482 int amdgpu_gmc_init_mem_ranges(struct amdgpu_device *adev); 483 483 void amdgpu_gmc_init_sw_mem_ranges(struct amdgpu_device *adev, 484 484 struct amdgpu_mem_partition_info *mem_ranges); 485 + int amdgpu_gmc_get_vram_info(struct amdgpu_device *adev, 486 + int *vram_width, int *vram_type, int *vram_vendor); 485 487 #endif
+4 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
··· 68 68 return -EINVAL; 69 69 70 70 spin_lock(&amdgpu_pasid_idr_lock); 71 + /* TODO: Need to replace the idr with an xarry, and then 72 + * handle the internal locking with ATOMIC safe paths. 73 + */ 71 74 pasid = idr_alloc_cyclic(&amdgpu_pasid_idr, NULL, 1, 72 - 1U << bits, GFP_KERNEL); 75 + 1U << bits, GFP_ATOMIC); 73 76 spin_unlock(&amdgpu_pasid_idr_lock); 74 77 75 78 if (pasid >= 0)
+5
drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
··· 44 44 #include <drm/display/drm_dp_mst_helper.h> 45 45 #include "modules/inc/mod_freesync.h" 46 46 #include "amdgpu_dm_irq_params.h" 47 + #include "amdgpu_dm_ism.h" 47 48 48 49 struct amdgpu_bo; 49 50 struct amdgpu_device; ··· 487 486 int deferred_flip_completion; 488 487 /* parameters access from DM IRQ handler */ 489 488 struct dm_irq_params dm_irq_params; 489 + 490 + /* DM idle state manager */ 491 + struct amdgpu_dm_ism ism; 492 + 490 493 /* pll sharing */ 491 494 struct amdgpu_atom_ss ss; 492 495 bool ss_enabled;
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
··· 1325 1325 if (r) 1326 1326 goto out; 1327 1327 1328 - r = amdgpu_fill_buffer(&adev->mman.clear_entity, abo, 0, &bo->base._resv, 1328 + r = amdgpu_fill_buffer(amdgpu_ttm_next_clear_entity(adev), 1329 + abo, 0, &bo->base._resv, 1329 1330 &fence, AMDGPU_KERNEL_JOB_ID_CLEAR_ON_RELEASE); 1330 1331 if (WARN_ON(r)) 1331 1332 goto out;
+8
drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
··· 1558 1558 unsigned char buf[RAS_TABLE_HEADER_SIZE] = { 0 }; 1559 1559 struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr; 1560 1560 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 1561 + int dev_var = adev->pdev->device & 0xF; 1562 + uint32_t vram_type = adev->gmc.vram_type; 1561 1563 int res; 1562 1564 1563 1565 if (amdgpu_ras_smu_eeprom_supported(adev)) ··· 1596 1594 if (hdr->header != RAS_TABLE_HDR_VAL && 1597 1595 hdr->header != RAS_TABLE_HDR_BAD) { 1598 1596 dev_info(adev->dev, "Creating a new EEPROM table"); 1597 + return amdgpu_ras_eeprom_reset_table(control); 1598 + } 1599 + 1600 + if (!(adev->flags & AMD_IS_APU) && (dev_var == 0x5) && 1601 + (vram_type == AMDGPU_VRAM_TYPE_HBM3E) && 1602 + (hdr->version < RAS_TABLE_VER_V3)) { 1599 1603 return amdgpu_ras_eeprom_reset_table(control); 1600 1604 } 1601 1605
+85 -46
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 387 387 { 388 388 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); 389 389 struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); 390 + struct amdgpu_ttm_buffer_entity *entity; 390 391 struct amdgpu_copy_mem src, dst; 391 392 struct dma_fence *fence = NULL; 392 393 int r; 394 + u32 e; 393 395 394 396 src.bo = bo; 395 397 dst.bo = bo; ··· 400 398 src.offset = 0; 401 399 dst.offset = 0; 402 400 401 + e = atomic_inc_return(&adev->mman.next_move_entity) % 402 + adev->mman.num_move_entities; 403 + entity = &adev->mman.move_entities[e]; 404 + 403 405 r = amdgpu_ttm_copy_mem_to_mem(adev, 404 - &adev->mman.move_entity, 406 + entity, 405 407 &src, &dst, 406 408 new_mem->size, 407 409 amdgpu_bo_encrypted(abo), ··· 417 411 if (old_mem->mem_type == TTM_PL_VRAM && 418 412 (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) { 419 413 struct dma_fence *wipe_fence = NULL; 420 - 421 - r = amdgpu_fill_buffer(&adev->mman.move_entity, 422 - abo, 0, NULL, &wipe_fence, 414 + r = amdgpu_fill_buffer(entity, abo, 0, NULL, &wipe_fence, 423 415 AMDGPU_KERNEL_JOB_ID_MOVE_BLIT); 424 416 if (r) { 425 417 goto error; ··· 858 854 int num_xcc = max(1U, adev->gfx.num_xcc_per_xcp); 859 855 uint64_t page_idx, pages_per_xcc; 860 856 int i; 861 - uint64_t ctrl_flags = AMDGPU_PTE_MTYPE_VG10(flags, AMDGPU_MTYPE_NC); 862 857 863 858 pages_per_xcc = total_pages; 864 859 do_div(pages_per_xcc, num_xcc); 865 860 866 861 for (i = 0, page_idx = 0; i < num_xcc; i++, page_idx += pages_per_xcc) { 867 - /* MQD page: use default flags */ 868 - amdgpu_gart_bind(adev, 862 + amdgpu_gart_map_gfx9_mqd(adev, 869 863 gtt->offset + (page_idx << PAGE_SHIFT), 870 - 1, &gtt->ttm.dma_address[page_idx], flags); 871 - /* 872 - * Ctrl pages - modify the memory type to NC (ctrl_flags) from 873 - * the second page of the BO onward. 874 - */ 875 - amdgpu_gart_bind(adev, 876 - gtt->offset + ((page_idx + 1) << PAGE_SHIFT), 877 - pages_per_xcc - 1, 878 - &gtt->ttm.dma_address[page_idx + 1], 879 - ctrl_flags); 864 + pages_per_xcc, &gtt->ttm.dma_address[page_idx], 865 + flags); 880 866 } 881 867 } 882 868 ··· 2339 2345 void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) 2340 2346 { 2341 2347 struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); 2348 + u32 num_clear_entities, num_move_entities; 2342 2349 uint64_t size; 2343 - int r; 2350 + int r, i, j; 2344 2351 2345 2352 if (!adev->mman.initialized || amdgpu_in_reset(adev) || 2346 2353 adev->mman.buffer_funcs_enabled == enable || adev->gmc.is_app_apu) ··· 2356 2361 return; 2357 2362 } 2358 2363 2364 + num_clear_entities = 1; 2365 + num_move_entities = 1; 2359 2366 ring = adev->mman.buffer_funcs_ring; 2360 2367 sched = &ring->sched; 2361 2368 r = amdgpu_ttm_buffer_entity_init(&adev->mman.gtt_mgr, ··· 2370 2373 return; 2371 2374 } 2372 2375 2373 - r = amdgpu_ttm_buffer_entity_init(&adev->mman.gtt_mgr, 2374 - &adev->mman.clear_entity, 2375 - DRM_SCHED_PRIORITY_NORMAL, 2376 - &sched, 1, 1); 2377 - if (r < 0) { 2378 - dev_err(adev->dev, 2379 - "Failed setting up TTM BO clear entity (%d)\n", r); 2376 + adev->mman.clear_entities = kcalloc(num_clear_entities, 2377 + sizeof(struct amdgpu_ttm_buffer_entity), 2378 + GFP_KERNEL); 2379 + atomic_set(&adev->mman.next_clear_entity, 0); 2380 + if (!adev->mman.clear_entities) 2380 2381 goto error_free_default_entity; 2382 + 2383 + adev->mman.num_clear_entities = num_clear_entities; 2384 + 2385 + for (i = 0; i < num_clear_entities; i++) { 2386 + r = amdgpu_ttm_buffer_entity_init( 2387 + &adev->mman.gtt_mgr, &adev->mman.clear_entities[i], 2388 + DRM_SCHED_PRIORITY_NORMAL, &sched, 1, 1); 2389 + 2390 + if (r < 0) { 2391 + for (j = 0; j < i; j++) 2392 + amdgpu_ttm_buffer_entity_fini( 2393 + &adev->mman.gtt_mgr, &adev->mman.clear_entities[j]); 2394 + kfree(adev->mman.clear_entities); 2395 + adev->mman.num_clear_entities = 0; 2396 + adev->mman.clear_entities = NULL; 2397 + goto error_free_default_entity; 2398 + } 2381 2399 } 2382 2400 2383 - r = amdgpu_ttm_buffer_entity_init(&adev->mman.gtt_mgr, 2384 - &adev->mman.move_entity, 2385 - DRM_SCHED_PRIORITY_NORMAL, 2386 - &sched, 1, 2); 2387 - if (r < 0) { 2388 - dev_err(adev->dev, 2389 - "Failed setting up TTM BO move entity (%d)\n", r); 2390 - goto error_free_clear_entity; 2401 + adev->mman.num_move_entities = num_move_entities; 2402 + atomic_set(&adev->mman.next_move_entity, 0); 2403 + for (i = 0; i < num_move_entities; i++) { 2404 + r = amdgpu_ttm_buffer_entity_init( 2405 + &adev->mman.gtt_mgr, 2406 + &adev->mman.move_entities[i], 2407 + DRM_SCHED_PRIORITY_NORMAL, &sched, 1, 2); 2408 + 2409 + if (r < 0) { 2410 + for (j = 0; j < i; j++) 2411 + amdgpu_ttm_buffer_entity_fini( 2412 + &adev->mman.gtt_mgr, &adev->mman.move_entities[j]); 2413 + adev->mman.num_move_entities = 0; 2414 + goto error_free_clear_entities; 2415 + } 2391 2416 } 2392 2417 } else { 2393 2418 amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr, 2394 2419 &adev->mman.default_entity); 2395 - amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr, 2396 - &adev->mman.clear_entity); 2397 - amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr, 2398 - &adev->mman.move_entity); 2420 + for (i = 0; i < adev->mman.num_clear_entities; i++) 2421 + amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr, 2422 + &adev->mman.clear_entities[i]); 2423 + for (i = 0; i < adev->mman.num_move_entities; i++) 2424 + amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr, 2425 + &adev->mman.move_entities[i]); 2399 2426 /* Drop all the old fences since re-creating the scheduler entities 2400 2427 * will allocate new contexts. 2401 2428 */ 2402 2429 ttm_resource_manager_cleanup(man); 2430 + kfree(adev->mman.clear_entities); 2431 + adev->mman.clear_entities = NULL; 2432 + adev->mman.num_clear_entities = 0; 2433 + adev->mman.num_move_entities = 0; 2403 2434 } 2404 2435 2405 2436 /* this just adjusts TTM size idea, which sets lpfn to the correct value */ ··· 2440 2415 2441 2416 return; 2442 2417 2443 - error_free_clear_entity: 2444 - amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr, 2445 - &adev->mman.clear_entity); 2418 + error_free_clear_entities: 2419 + for (i = 0; i < adev->mman.num_clear_entities; i++) 2420 + amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr, 2421 + &adev->mman.clear_entities[i]); 2422 + kfree(adev->mman.clear_entities); 2423 + adev->mman.clear_entities = NULL; 2424 + adev->mman.num_clear_entities = 0; 2446 2425 error_free_default_entity: 2447 2426 amdgpu_ttm_buffer_entity_fini(&adev->mman.gtt_mgr, 2448 2427 &adev->mman.default_entity); ··· 2596 2567 2597 2568 if (!fence) 2598 2569 return -EINVAL; 2599 - 2600 - entity = &adev->mman.clear_entity; 2570 + entity = &adev->mman.clear_entities[0]; 2601 2571 *fence = dma_fence_get_stub(); 2602 2572 2603 2573 amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor); ··· 2648 2620 struct amdgpu_res_cursor dst; 2649 2621 int r; 2650 2622 2651 - if (!adev->mman.buffer_funcs_enabled) { 2652 - dev_err(adev->dev, 2653 - "Trying to clear memory with ring turned off.\n"); 2623 + if (!entity) 2654 2624 return -EINVAL; 2655 - } 2656 2625 2657 2626 amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &dst); 2658 2627 ··· 2683 2658 *f = dma_fence_get(fence); 2684 2659 dma_fence_put(fence); 2685 2660 return r; 2661 + } 2662 + 2663 + struct amdgpu_ttm_buffer_entity * 2664 + amdgpu_ttm_next_clear_entity(struct amdgpu_device *adev) 2665 + { 2666 + struct amdgpu_mman *mman = &adev->mman; 2667 + u32 i; 2668 + 2669 + if (mman->num_clear_entities == 0) 2670 + return NULL; 2671 + 2672 + i = atomic_inc_return(&mman->next_clear_entity) % 2673 + mman->num_clear_entities; 2674 + return &mman->clear_entities[i]; 2686 2675 } 2687 2676 2688 2677 /**
+7 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
··· 72 72 73 73 /* @default_entity: for workarounds, has no gart windows */ 74 74 struct amdgpu_ttm_buffer_entity default_entity; 75 - struct amdgpu_ttm_buffer_entity clear_entity; 76 - struct amdgpu_ttm_buffer_entity move_entity; 75 + struct amdgpu_ttm_buffer_entity *clear_entities; 76 + atomic_t next_clear_entity; 77 + u32 num_clear_entities; 78 + struct amdgpu_ttm_buffer_entity move_entities[TTM_NUM_MOVE_FENCES]; 79 + atomic_t next_move_entity; 80 + u32 num_move_entities; 77 81 78 82 struct amdgpu_vram_mgr vram_mgr; 79 83 struct amdgpu_gtt_mgr gtt_mgr; ··· 195 191 struct dma_resv *resv, 196 192 struct dma_fence **f, 197 193 u64 k_job_id); 194 + struct amdgpu_ttm_buffer_entity *amdgpu_ttm_next_clear_entity(struct amdgpu_device *adev); 198 195 199 196 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo); 200 197 void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
+18 -9
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
··· 600 600 goto unpin_bo; 601 601 } 602 602 603 + /* Validate doorbell_offset is within the doorbell BO */ 604 + if ((u64)db_info->doorbell_offset * db_size + db_size > 605 + amdgpu_bo_size(db_obj->obj)) { 606 + r = -EINVAL; 607 + goto unpin_bo; 608 + } 609 + 603 610 index = amdgpu_doorbell_index_on_bar(uq_mgr->adev, db_obj->obj, 604 611 db_info->doorbell_offset, db_size); 605 612 drm_dbg_driver(adev_to_drm(uq_mgr->adev), ··· 1004 997 unsigned long queue_id; 1005 998 int ret = 0, r; 1006 999 1000 + mutex_lock(&uq_mgr->userq_mutex); 1007 1001 /* Resume all the queues for this process */ 1008 1002 xa_for_each(&uq_mgr->userq_xa, queue_id, queue) { 1009 1003 ··· 1020 1012 ret = r; 1021 1013 1022 1014 } 1015 + mutex_unlock(&uq_mgr->userq_mutex); 1023 1016 1024 1017 if (ret) 1025 1018 drm_file_err(uq_mgr->file, "Failed to map all the queues\n"); ··· 1224 1215 struct dma_fence *ev_fence; 1225 1216 int ret; 1226 1217 1227 - mutex_lock(&uq_mgr->userq_mutex); 1228 1218 ev_fence = amdgpu_evf_mgr_get_fence(&fpriv->evf_mgr); 1229 1219 if (!dma_fence_is_signaled(ev_fence)) 1230 - goto unlock; 1220 + goto put_fence; 1231 1221 1232 1222 ret = amdgpu_userq_vm_validate(uq_mgr); 1233 1223 if (ret) { 1234 1224 drm_file_err(uq_mgr->file, "Failed to validate BOs to restore\n"); 1235 - goto unlock; 1225 + goto put_fence; 1236 1226 } 1237 1227 1238 1228 ret = amdgpu_userq_restore_all(uq_mgr); 1239 1229 if (ret) 1240 1230 drm_file_err(uq_mgr->file, "Failed to restore all queues\n"); 1241 1231 1242 - unlock: 1243 - mutex_unlock(&uq_mgr->userq_mutex); 1232 + put_fence: 1244 1233 dma_fence_put(ev_fence); 1245 1234 } 1246 1235 ··· 1461 1454 1462 1455 if (!adev->userq_halt_for_enforce_isolation) 1463 1456 dev_warn(adev->dev, "userq scheduling already started!\n"); 1457 + 1464 1458 adev->userq_halt_for_enforce_isolation = false; 1459 + 1465 1460 xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) { 1466 1461 uqm = queue->userq_mgr; 1467 1462 mutex_lock(&uqm->userq_mutex); 1468 - if (((queue->queue_type == AMDGPU_HW_IP_GFX) || 1469 - (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) && 1470 - (queue->xcp_id == idx)) { 1463 + if (((queue->queue_type == AMDGPU_HW_IP_GFX) || 1464 + (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) && 1465 + (queue->xcp_id == idx)) { 1471 1466 r = amdgpu_userq_restore_helper(queue); 1472 1467 if (r) 1473 1468 ret = r; 1474 - } 1469 + } 1475 1470 mutex_unlock(&uqm->userq_mutex); 1476 1471 } 1477 1472
+2 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
··· 328 328 return r; 329 329 } 330 330 331 - r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1); 332 - if (r) { 333 - dev_err(adev->dev, "allocating fence slot failed (%d)\n", r); 331 + r = dma_resv_reserve_fences(rbo->tbo.base.resv, TTM_NUM_MOVE_FENCES); 332 + if (r) 334 333 goto error_unlock; 335 - } 336 334 337 335 if (plane->type != DRM_PLANE_TYPE_CURSOR) 338 336 domain = amdgpu_display_supported_domains(adev, rbo->flags);
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
··· 173 173 #define AMDGPU_VA_RESERVED_SEQ64_SIZE (2ULL << 20) 174 174 #define AMDGPU_VA_RESERVED_SEQ64_START(adev) (AMDGPU_VA_RESERVED_CSA_START(adev) \ 175 175 - AMDGPU_VA_RESERVED_SEQ64_SIZE) 176 - #define AMDGPU_VA_RESERVED_TRAP_SIZE (2ULL << 12) 176 + #define AMDGPU_VA_RESERVED_TRAP_SIZE (1ULL << 16) 177 177 #define AMDGPU_VA_RESERVED_TRAP_START(adev) (AMDGPU_VA_RESERVED_SEQ64_START(adev) \ 178 178 - AMDGPU_VA_RESERVED_TRAP_SIZE) 179 179 #define AMDGPU_VA_RESERVED_BOTTOM (1ULL << 16)
+14
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
··· 1722 1722 } 1723 1723 } 1724 1724 break; 1725 + case IP_VERSION(11, 5, 4): 1726 + adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex; 1727 + adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex); 1728 + if (adev->gfx.me_fw_version >= 4 && 1729 + adev->gfx.pfp_fw_version >= 7 && 1730 + adev->gfx.mec_fw_version >= 5) { 1731 + adev->gfx.enable_cleaner_shader = true; 1732 + r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size); 1733 + if (r) { 1734 + adev->gfx.enable_cleaner_shader = false; 1735 + dev_err(adev->dev, "Failed to initialize cleaner shader\n"); 1736 + } 1737 + } 1738 + break; 1725 1739 default: 1726 1740 adev->gfx.enable_cleaner_shader = false; 1727 1741 break;
+1 -1
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
··· 767 767 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_GDDR6; 768 768 adev->gmc.vram_width = 1 * 128; /* numchan * chansize */ 769 769 } else { 770 - r = amdgpu_atomfirmware_get_vram_info(adev, 770 + r = amdgpu_gmc_get_vram_info(adev, 771 771 &vram_width, &vram_type, &vram_vendor); 772 772 adev->gmc.vram_width = vram_width; 773 773
+1 -1
drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
··· 751 751 752 752 spin_lock_init(&adev->gmc.invalidate_lock); 753 753 754 - r = amdgpu_atomfirmware_get_vram_info(adev, 754 + r = amdgpu_gmc_get_vram_info(adev, 755 755 &vram_width, &vram_type, &vram_vendor); 756 756 adev->gmc.vram_width = vram_width; 757 757
+1 -1
drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
··· 825 825 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 1, 0)) { 826 826 gmc_v12_1_init_vram_info(adev); 827 827 } else { 828 - r = amdgpu_atomfirmware_get_vram_info(adev, 828 + r = amdgpu_gmc_get_vram_info(adev, 829 829 &vram_width, &vram_type, &vram_vendor); 830 830 adev->gmc.vram_width = vram_width; 831 831 adev->gmc.vram_type = vram_type;
+29 -23
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
··· 1823 1823 adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0); 1824 1824 } 1825 1825 1826 - static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev) 1826 + static void gmc_v9_0_init_vram_info(struct amdgpu_device *adev) 1827 1827 { 1828 1828 static const u32 regBIF_BIOS_SCRATCH_4 = 0x50; 1829 + int dev_var = adev->pdev->device & 0xF; 1829 1830 u32 vram_info; 1830 1831 1831 - adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM; 1832 - adev->gmc.vram_width = 128 * 64; 1832 + if (adev->gmc.is_app_apu) { 1833 + adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM; 1834 + adev->gmc.vram_width = 128 * 64; 1835 + } else if (adev->flags & AMD_IS_APU) { 1836 + adev->gmc.vram_type = AMDGPU_VRAM_TYPE_DDR4; 1837 + adev->gmc.vram_width = 64 * 64; 1838 + } else if (amdgpu_is_multi_aid(adev)) { 1839 + adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM; 1840 + adev->gmc.vram_width = 128 * 64; 1833 1841 1834 - if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)) 1835 - adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E; 1842 + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)) 1843 + adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E; 1836 1844 1837 - if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) && 1838 - adev->rev_id == 0x3) 1839 - adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E; 1845 + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) && 1846 + adev->rev_id == 0x3) 1847 + adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E; 1840 1848 1841 - if (!(adev->flags & AMD_IS_APU) && !amdgpu_sriov_vf(adev)) { 1842 - vram_info = RREG32(regBIF_BIOS_SCRATCH_4); 1843 - adev->gmc.vram_vendor = vram_info & 0xF; 1849 + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) && 1850 + (dev_var == 0x5)) 1851 + adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E; 1852 + 1853 + if (!(adev->flags & AMD_IS_APU) && !amdgpu_sriov_vf(adev)) { 1854 + vram_info = RREG32(regBIF_BIOS_SCRATCH_4); 1855 + adev->gmc.vram_vendor = vram_info & 0xF; 1856 + } 1844 1857 } 1845 1858 } 1846 1859 ··· 1869 1856 1870 1857 spin_lock_init(&adev->gmc.invalidate_lock); 1871 1858 1872 - if (amdgpu_is_multi_aid(adev)) { 1873 - gmc_v9_4_3_init_vram_info(adev); 1874 - } else if (!adev->bios) { 1875 - if (adev->flags & AMD_IS_APU) { 1876 - adev->gmc.vram_type = AMDGPU_VRAM_TYPE_DDR4; 1877 - adev->gmc.vram_width = 64 * 64; 1878 - } else { 1879 - adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM; 1880 - adev->gmc.vram_width = 128 * 64; 1881 - } 1859 + if (!adev->bios) { 1860 + gmc_v9_0_init_vram_info(adev); 1882 1861 } else { 1883 - r = amdgpu_atomfirmware_get_vram_info(adev, 1884 - &vram_width, &vram_type, &vram_vendor); 1862 + r = amdgpu_gmc_get_vram_info(adev, 1863 + &vram_width, &vram_type, &vram_vendor); 1885 1864 if (amdgpu_sriov_vf(adev)) 1886 1865 /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN, 1887 1866 * and DF related registers is not readable, seems hardcord is the ··· 1901 1896 adev->gmc.vram_type = vram_type; 1902 1897 adev->gmc.vram_vendor = vram_vendor; 1903 1898 } 1899 + 1904 1900 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1905 1901 case IP_VERSION(9, 1, 0): 1906 1902 case IP_VERSION(9, 2, 2):
+12 -4
drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
··· 324 324 325 325 r = amdgpu_userq_input_va_validate(adev, queue, compute_mqd->eop_va, 326 326 2048); 327 - if (r) 327 + if (r) { 328 + kfree(compute_mqd); 328 329 goto free_mqd; 330 + } 329 331 330 332 userq_props->eop_gpu_addr = compute_mqd->eop_va; 331 333 userq_props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL; ··· 367 365 368 366 r = amdgpu_userq_input_va_validate(adev, queue, mqd_gfx_v11->shadow_va, 369 367 shadow_info.shadow_size); 370 - if (r) 368 + if (r) { 369 + kfree(mqd_gfx_v11); 371 370 goto free_mqd; 371 + } 372 372 r = amdgpu_userq_input_va_validate(adev, queue, mqd_gfx_v11->csa_va, 373 373 shadow_info.csa_size); 374 - if (r) 374 + if (r) { 375 + kfree(mqd_gfx_v11); 375 376 goto free_mqd; 377 + } 376 378 377 379 kfree(mqd_gfx_v11); 378 380 } else if (queue->queue_type == AMDGPU_HW_IP_DMA) { ··· 396 390 } 397 391 r = amdgpu_userq_input_va_validate(adev, queue, mqd_sdma_v11->csa_va, 398 392 32); 399 - if (r) 393 + if (r) { 394 + kfree(mqd_sdma_v11); 400 395 goto free_mqd; 396 + } 401 397 402 398 userq_props->csa_addr = mqd_sdma_v11->csa_va; 403 399 kfree(mqd_sdma_v11);
+2 -1
drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
··· 170 170 int retry_loop; 171 171 172 172 /* For a reset done at the end of S3, only wait for TOS to be unloaded */ 173 - if (adev->in_s3 && !(adev->flags & AMD_IS_APU) && amdgpu_in_reset(adev)) 173 + if ((adev->in_s4 || adev->in_s3) && !(adev->flags & AMD_IS_APU) && 174 + amdgpu_in_reset(adev)) 174 175 return psp_v11_wait_for_tos_unload(psp); 175 176 176 177 for (retry_loop = 0; retry_loop < 20; retry_loop++) {
+5
drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
··· 93 93 static int uvd_v4_2_early_init(struct amdgpu_ip_block *ip_block) 94 94 { 95 95 struct amdgpu_device *adev = ip_block->adev; 96 + 97 + /* UVD doesn't work without DPM, it needs DPM to ungate it. */ 98 + if (!amdgpu_dpm) 99 + return -ENOENT; 100 + 96 101 adev->uvd.num_uvd_inst = 1; 97 102 98 103 uvd_v4_2_set_ring_funcs(adev);
+16 -12
drivers/gpu/drm/amd/amdkfd/kfd_int_process_v12_1.c
··· 144 144 #define KFD_CTXID0_DOORBELL_ID(ctxid0) ((ctxid0) & \ 145 145 KFD_CTXID0_DOORBELL_ID_MASK) 146 146 147 - static void print_sq_intr_info_auto(uint32_t context_id0, uint32_t context_id1) 147 + static void print_sq_intr_info_auto(struct kfd_node *dev, uint32_t context_id0, uint32_t context_id1) 148 148 { 149 - pr_debug_ratelimited( 149 + dev_dbg_ratelimited( 150 + dev->adev->dev, 150 151 "sq_intr: auto, ttrace %d, wlt %d, ttrace_buf0_full %d, ttrace_buf1_full %d ttrace_utc_err %d\n", 151 152 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, THREAD_TRACE), 152 153 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, WLT), ··· 156 155 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, THREAD_TRACE_UTC_ERROR)); 157 156 } 158 157 159 - static void print_sq_intr_info_inst(uint32_t context_id0, uint32_t context_id1) 158 + static void print_sq_intr_info_inst(struct kfd_node *dev, uint32_t context_id0, uint32_t context_id1) 160 159 { 161 - pr_debug_ratelimited( 160 + dev_dbg_ratelimited( 161 + dev->adev->dev, 162 162 "sq_intr: inst, data 0x%08x, sh %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n", 163 163 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, DATA), 164 164 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, SA_ID), ··· 169 167 REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1, WGP_ID)); 170 168 } 171 169 172 - static void print_sq_intr_info_error(uint32_t context_id0, uint32_t context_id1) 170 + static void print_sq_intr_info_error(struct kfd_node *dev, uint32_t context_id0, uint32_t context_id1) 173 171 { 174 - pr_debug_ratelimited( 172 + dev_warn_ratelimited( 173 + dev->adev->dev, 175 174 "sq_intr: error, detail 0x%08x, type %d, sh %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n", 176 175 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, DETAIL), 177 176 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_ERROR_CTXID0, TYPE), ··· 249 246 vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry); 250 247 251 248 if (!kfd_irq_is_from_node(node, node_id, vmid)) { 252 - pr_debug("Interrupt not for Node, node_id: %d, vmid: %d\n", node_id, vmid); 249 + dev_dbg_ratelimited(node->adev->dev, 250 + "Interrupt not for Node, node_id: %d, vmid: %d\n", node_id, vmid); 253 251 return false; 254 252 } 255 253 ··· 270 266 (context_id0 & AMDGPU_FENCE_MES_QUEUE_FLAG)) 271 267 return false; 272 268 273 - pr_debug("client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n", 269 + dev_dbg(node->adev->dev, "client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n", 274 270 client_id, source_id, vmid, pasid); 275 - pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n", 271 + dev_dbg(node->adev->dev, "%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n", 276 272 data[0], data[1], data[2], data[3], 277 273 data[4], data[5], data[6], data[7]); 278 274 ··· 365 361 SQ_INTERRUPT_WORD_WAVE_CTXID1, ENCODING); 366 362 switch (sq_int_enc) { 367 363 case SQ_INTERRUPT_WORD_ENCODING_AUTO: 368 - print_sq_intr_info_auto(context_id0, context_id1); 364 + print_sq_intr_info_auto(node, context_id0, context_id1); 369 365 break; 370 366 case SQ_INTERRUPT_WORD_ENCODING_INST: 371 - print_sq_intr_info_inst(context_id0, context_id1); 367 + print_sq_intr_info_inst(node, context_id0, context_id1); 372 368 sq_int_priv = REG_GET_FIELD(context_id0, 373 369 SQ_INTERRUPT_WORD_WAVE_CTXID0, PRIV); 374 370 if (sq_int_priv && (kfd_set_dbg_ev_from_interrupt(node, pasid, ··· 378 374 return; 379 375 break; 380 376 case SQ_INTERRUPT_WORD_ENCODING_ERROR: 381 - print_sq_intr_info_error(context_id0, context_id1); 377 + print_sq_intr_info_error(node, context_id0, context_id1); 382 378 sq_int_errtype = REG_GET_FIELD(context_id0, 383 379 SQ_INTERRUPT_WORD_ERROR_CTXID0, TYPE); 384 380 if (sq_int_errtype != SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST &&
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
··· 136 136 u64 size; 137 137 int r; 138 138 139 - entity = &adev->mman.move_entity; 139 + entity = &adev->mman.move_entities[0]; 140 140 141 141 mutex_lock(&entity->lock); 142 142
+15 -8
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
··· 42 42 struct queue_properties *q) 43 43 { 44 44 if (mm->dev->kfd->cwsr_enabled && 45 - q->type == KFD_QUEUE_TYPE_COMPUTE) 46 - return ALIGN(q->ctl_stack_size, PAGE_SIZE) + 47 - ALIGN(sizeof(struct v9_mqd), PAGE_SIZE); 45 + q->type == KFD_QUEUE_TYPE_COMPUTE) { 46 + 47 + /* On gfxv9, the MQD resides in the first 4K page, 48 + * followed by the control stack. Align both to 49 + * AMDGPU_GPU_PAGE_SIZE to maintain the required 4K boundary. 50 + */ 51 + 52 + return ALIGN(ALIGN(q->ctl_stack_size, AMDGPU_GPU_PAGE_SIZE) + 53 + ALIGN(sizeof(struct v9_mqd), AMDGPU_GPU_PAGE_SIZE), PAGE_SIZE); 54 + } 48 55 49 56 return mm->mqd_size; 50 57 } ··· 157 150 if (!mqd_mem_obj) 158 151 return NULL; 159 152 retval = amdgpu_amdkfd_alloc_kernel_mem(node->adev, 160 - (ALIGN(q->ctl_stack_size, PAGE_SIZE) + 161 - ALIGN(sizeof(struct v9_mqd), PAGE_SIZE)) * 153 + (ALIGN(ALIGN(q->ctl_stack_size, AMDGPU_GPU_PAGE_SIZE) + 154 + ALIGN(sizeof(struct v9_mqd), AMDGPU_GPU_PAGE_SIZE), PAGE_SIZE)) * 162 155 NUM_XCC(node->xcc_mask), 163 156 mqd_on_vram(node->adev) ? AMDGPU_GEM_DOMAIN_VRAM : 164 157 AMDGPU_GEM_DOMAIN_GTT, ··· 366 359 struct kfd_context_save_area_header header; 367 360 368 361 /* Control stack is located one page after MQD. */ 369 - void *mqd_ctl_stack = (void *)((uintptr_t)mqd + PAGE_SIZE); 362 + void *mqd_ctl_stack = (void *)((uintptr_t)mqd + AMDGPU_GPU_PAGE_SIZE); 370 363 371 364 m = get_mqd(mqd); 372 365 ··· 406 399 { 407 400 struct v9_mqd *m; 408 401 /* Control stack is located one page after MQD. */ 409 - void *ctl_stack = (void *)((uintptr_t)mqd + PAGE_SIZE); 402 + void *ctl_stack = (void *)((uintptr_t)mqd + AMDGPU_GPU_PAGE_SIZE); 410 403 411 404 m = get_mqd(mqd); 412 405 ··· 452 445 *gart_addr = addr; 453 446 454 447 /* Control stack is located one page after MQD. */ 455 - ctl_stack = (void *)((uintptr_t)*mqd + PAGE_SIZE); 448 + ctl_stack = (void *)((uintptr_t)*mqd + AMDGPU_GPU_PAGE_SIZE); 456 449 memcpy(ctl_stack, ctl_stack_src, ctl_stack_size); 457 450 458 451 m->cp_hqd_pq_doorbell_control =
+2 -2
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
··· 102 102 * The first chunk is the TBA used for the CWSR ISA code. The second 103 103 * chunk is used as TMA for user-mode trap handler setup in daisy-chain mode. 104 104 */ 105 - #define KFD_CWSR_TBA_TMA_SIZE (PAGE_SIZE * 2) 106 - #define KFD_CWSR_TMA_OFFSET (PAGE_SIZE + 2048) 105 + #define KFD_CWSR_TBA_TMA_SIZE (AMDGPU_GPU_PAGE_SIZE * 2) 106 + #define KFD_CWSR_TMA_OFFSET (AMDGPU_GPU_PAGE_SIZE + 2048) 107 107 108 108 #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \ 109 109 (KFD_MAX_NUM_OF_PROCESSES * \
+2 -1
drivers/gpu/drm/amd/amdkfd/kfd_process.c
··· 679 679 680 680 void kfd_procfs_del_queue(struct queue *q) 681 681 { 682 - if (!q) 682 + if (!q || !q->process->kobj) 683 683 return; 684 684 685 685 kobject_del(&q->kobj); ··· 858 858 if (ret) { 859 859 pr_warn("Creating procfs pid directory failed"); 860 860 kobject_put(process->kobj); 861 + process->kobj = NULL; 861 862 return ret; 862 863 } 863 864
+6 -5
drivers/gpu/drm/amd/amdkfd/kfd_queue.c
··· 249 249 topo_dev->node_props.gfx_target_version < 90000) 250 250 /* metadata_queue_size not supported on GFX7/GFX8 */ 251 251 expected_queue_size = 252 - properties->queue_size / 2; 252 + PAGE_ALIGN(properties->queue_size / 2); 253 253 else 254 254 expected_queue_size = 255 - properties->queue_size + properties->metadata_queue_size; 255 + PAGE_ALIGN(properties->queue_size + properties->metadata_queue_size); 256 256 257 257 vm = drm_priv_to_vm(pdd->drm_priv); 258 258 err = amdgpu_bo_reserve(vm->root.bo, false); ··· 492 492 cu_num = props->simd_count / props->simd_per_cu / NUM_XCC(dev->gpu->xcc_mask); 493 493 wave_num = get_num_waves(props, gfxv, cu_num); 494 494 495 - wg_data_size = ALIGN(cu_num * WG_CONTEXT_DATA_SIZE_PER_CU(gfxv, props), PAGE_SIZE); 495 + wg_data_size = ALIGN(cu_num * WG_CONTEXT_DATA_SIZE_PER_CU(gfxv, props), 496 + AMDGPU_GPU_PAGE_SIZE); 496 497 ctl_stack_size = wave_num * CNTL_STACK_BYTES_PER_WAVE(gfxv) + 8; 497 498 ctl_stack_size = ALIGN(SIZEOF_HSA_USER_CONTEXT_SAVE_AREA_HEADER + ctl_stack_size, 498 - PAGE_SIZE); 499 + AMDGPU_GPU_PAGE_SIZE); 499 500 500 501 if ((gfxv / 10000 * 10000) == 100000) { 501 502 /* HW design limits control stack size to 0x7000. ··· 508 507 509 508 props->ctl_stack_size = ctl_stack_size; 510 509 props->debug_memory_size = ALIGN(wave_num * DEBUGGER_BYTES_PER_WAVE, DEBUGGER_BYTES_ALIGN); 511 - props->cwsr_size = ctl_stack_size + wg_data_size; 510 + props->cwsr_size = ALIGN(ctl_stack_size + wg_data_size, PAGE_SIZE); 512 511 513 512 if (gfxv == 80002) /* GFX_VERSION_TONGA */ 514 513 props->eop_buffer_size = 0x8000;
+1 -2
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
··· 628 628 } 629 629 } 630 630 631 - r = dma_resv_reserve_fences(bo->tbo.base.resv, 1); 631 + r = dma_resv_reserve_fences(bo->tbo.base.resv, TTM_NUM_MOVE_FENCES); 632 632 if (r) { 633 - pr_debug("failed %d to reserve bo\n", r); 634 633 amdgpu_bo_unreserve(bo); 635 634 goto reserve_bo_failed; 636 635 }
+2 -1
drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
··· 40 40 amdgpu_dm_replay.o \ 41 41 amdgpu_dm_quirks.o \ 42 42 amdgpu_dm_wb.o \ 43 - amdgpu_dm_colorop.o 43 + amdgpu_dm_colorop.o \ 44 + amdgpu_dm_ism.o 44 45 45 46 ifdef CONFIG_DRM_AMD_DC_FP 46 47 AMDGPUDM += dc_fpu.o
+13 -28
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 3283 3283 3284 3284 mutex_lock(&dm->dc_lock); 3285 3285 3286 + amdgpu_dm_ism_disable(dm); 3286 3287 dc_allow_idle_optimizations(adev->dm.dc, false); 3287 3288 3288 3289 dm->cached_dc_state = dc_state_create_copy(dm->dc->current_state); ··· 3316 3315 s3_handle_mst(adev_to_drm(adev), true); 3317 3316 3318 3317 amdgpu_dm_irq_suspend(adev); 3318 + 3319 + scoped_guard(mutex, &dm->dc_lock) 3320 + amdgpu_dm_ism_disable(dm); 3319 3321 3320 3322 hpd_rx_irq_work_suspend(dm); 3321 3323 ··· 3610 3606 3611 3607 dc_resume(dm->dc); 3612 3608 3609 + amdgpu_dm_ism_enable(dm); 3613 3610 amdgpu_dm_irq_resume_early(adev); 3614 3611 3615 3612 for (i = 0; i < dc_state->stream_count; i++) { ··· 3670 3665 3671 3666 /* program HPD filter */ 3672 3667 dc_resume(dm->dc); 3668 + 3669 + scoped_guard(mutex, &dm->dc_lock) 3670 + amdgpu_dm_ism_enable(dm); 3673 3671 3674 3672 /* 3675 3673 * early enable HPD Rx IRQ, should be done before set mode as short ··· 5589 5581 case IP_VERSION(3, 5, 0): 5590 5582 case IP_VERSION(3, 5, 1): 5591 5583 case IP_VERSION(3, 6, 0): 5584 + case IP_VERSION(4, 2, 0): 5592 5585 replay_feature_enabled = true; 5593 5586 break; 5594 5587 ··· 9342 9333 if (acrtc_state) { 9343 9334 timing = &acrtc_state->stream->timing; 9344 9335 9345 - /* 9346 - * Depending on when the HW latching event of double-buffered 9347 - * registers happen relative to the PSR SDP deadline, and how 9348 - * bad the Panel clock has drifted since the last ALPM off 9349 - * event, there can be up to 3 frames of delay between sending 9350 - * the PSR exit cmd to DMUB fw, and when the panel starts 9351 - * displaying live frames. 9352 - * 9353 - * We can set: 9354 - * 9355 - * 20/100 * offdelay_ms = 3_frames_ms 9356 - * => offdelay_ms = 5 * 3_frames_ms 9357 - * 9358 - * This ensures that `3_frames_ms` will only be experienced as a 9359 - * 20% delay on top how long the display has been static, and 9360 - * thus make the delay less perceivable. 9361 - */ 9362 - if (acrtc_state->stream->link->psr_settings.psr_version < 9363 - DC_PSR_VERSION_UNSUPPORTED) { 9364 - offdelay = DIV64_U64_ROUND_UP((u64)5 * 3 * 10 * 9365 - timing->v_total * 9366 - timing->h_total, 9367 - timing->pix_clk_100hz); 9368 - config.offdelay_ms = offdelay ?: 30; 9369 - } else if (amdgpu_ip_version(adev, DCE_HWIP, 0) < 9336 + if (amdgpu_ip_version(adev, DCE_HWIP, 0) < 9370 9337 IP_VERSION(3, 5, 0) || 9371 9338 !(adev->flags & AMD_IS_APU)) { 9372 9339 /* ··· 9874 9889 } 9875 9890 9876 9891 /* Decrement skip count when SR is enabled and we're doing fast updates. */ 9877 - if (acrtc_state->update_type <= UPDATE_TYPE_FAST && 9892 + if (acrtc_state->update_type == UPDATE_TYPE_FAST && 9878 9893 (psr->psr_feature_enabled || pr->config.replay_supported)) { 9879 9894 if (aconn->sr_skip_count > 0) 9880 9895 aconn->sr_skip_count--; ··· 10084 10099 * fast updates. 10085 10100 */ 10086 10101 if (crtc->state->async_flip && 10087 - (acrtc_state->update_type > UPDATE_TYPE_FAST || 10102 + (acrtc_state->update_type != UPDATE_TYPE_FAST || 10088 10103 get_mem_type(old_plane_state->fb) != get_mem_type(fb))) 10089 10104 drm_warn_once(state->dev, 10090 10105 "[PLANE:%d:%s] async flip with non-fast update\n", ··· 10092 10107 10093 10108 bundle->flip_addrs[planes_count].flip_immediate = 10094 10109 crtc->state->async_flip && 10095 - acrtc_state->update_type <= UPDATE_TYPE_FAST && 10110 + acrtc_state->update_type == UPDATE_TYPE_FAST && 10096 10111 get_mem_type(old_plane_state->fb) == get_mem_type(fb); 10097 10112 10098 10113 timestamp_ns = ktime_get_ns();
+54 -38
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
··· 124 124 * - Enable condition same as above 125 125 * - Disable when vblank counter is enabled 126 126 */ 127 - static void amdgpu_dm_crtc_set_panel_sr_feature( 128 - struct vblank_control_work *vblank_work, 127 + void amdgpu_dm_crtc_set_panel_sr_feature( 128 + struct amdgpu_display_manager *dm, 129 + struct amdgpu_crtc *acrtc, 130 + struct dc_stream_state *stream, 129 131 bool vblank_enabled, bool allow_sr_entry) 130 132 { 131 - struct dc_link *link = vblank_work->stream->link; 133 + struct dc_link *link = stream->link; 132 134 bool is_sr_active = (link->replay_settings.replay_allow_active || 133 135 link->psr_settings.psr_allow_active); 134 136 bool is_crc_window_active = false; 135 - bool vrr_active = amdgpu_dm_crtc_vrr_active_irq(vblank_work->acrtc); 137 + bool vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc); 136 138 137 139 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY 138 140 is_crc_window_active = 139 - amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base); 141 + amdgpu_dm_crc_window_is_activated(&acrtc->base); 140 142 #endif 141 143 142 144 if (link->replay_settings.replay_feature_enabled && !vrr_active && 143 145 allow_sr_entry && !is_sr_active && !is_crc_window_active) { 144 - amdgpu_dm_replay_enable(vblank_work->stream, true); 146 + amdgpu_dm_replay_enable(stream, true); 145 147 } else if (vblank_enabled) { 146 148 if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 && is_sr_active) 147 - amdgpu_dm_psr_disable(vblank_work->stream, false); 149 + amdgpu_dm_psr_disable(stream, false); 148 150 } else if (link->psr_settings.psr_feature_enabled && !vrr_active && 149 151 allow_sr_entry && !is_sr_active && !is_crc_window_active) { 150 152 151 153 struct amdgpu_dm_connector *aconn = 152 - (struct amdgpu_dm_connector *) vblank_work->stream->dm_stream_context; 154 + (struct amdgpu_dm_connector *) stream->dm_stream_context; 153 155 154 156 if (!aconn->disallow_edp_enter_psr) { 155 - struct amdgpu_display_manager *dm = vblank_work->dm; 156 - 157 - amdgpu_dm_psr_enable(vblank_work->stream); 157 + amdgpu_dm_psr_enable(stream); 158 158 if (dm->idle_workqueue && 159 159 (dm->dc->config.disable_ips == DMUB_IPS_ENABLE) && 160 160 dm->dc->idle_optimizations_allowed && ··· 251 251 252 252 mutex_lock(&dm->dc_lock); 253 253 254 - if (vblank_work->enable) 254 + if (vblank_work->enable) { 255 255 dm->active_vblank_irq_count++; 256 - else if (dm->active_vblank_irq_count) 257 - dm->active_vblank_irq_count--; 258 - 259 - if (dm->active_vblank_irq_count > 0) 260 - dc_allow_idle_optimizations(dm->dc, false); 261 - 262 - /* 263 - * Control PSR based on vblank requirements from OS 264 - * 265 - * If panel supports PSR SU, there's no need to disable PSR when OS is 266 - * submitting fast atomic commits (we infer this by whether the OS 267 - * requests vblank events). Fast atomic commits will simply trigger a 268 - * full-frame-update (FFU); a specific case of selective-update (SU) 269 - * where the SU region is the full hactive*vactive region. See 270 - * fill_dc_dirty_rects(). 271 - */ 272 - if (vblank_work->stream && vblank_work->stream->link && vblank_work->acrtc) { 273 - amdgpu_dm_crtc_set_panel_sr_feature( 274 - vblank_work, vblank_work->enable, 275 - vblank_work->acrtc->dm_irq_params.allow_sr_entry); 276 - } 277 - 278 - if (dm->active_vblank_irq_count == 0) { 279 - dc_post_update_surfaces_to_stream(dm->dc); 280 - dc_allow_idle_optimizations(dm->dc, true); 256 + amdgpu_dm_ism_commit_event(&vblank_work->acrtc->ism, 257 + DM_ISM_EVENT_EXIT_IDLE_REQUESTED); 258 + } else { 259 + if (dm->active_vblank_irq_count > 0) 260 + dm->active_vblank_irq_count--; 261 + amdgpu_dm_ism_commit_event(&vblank_work->acrtc->ism, 262 + DM_ISM_EVENT_ENTER_IDLE_REQUESTED); 281 263 } 282 264 283 265 mutex_unlock(&dm->dc_lock); ··· 458 476 459 477 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc) 460 478 { 479 + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 480 + 481 + amdgpu_dm_ism_fini(&acrtc->ism); 461 482 drm_crtc_cleanup(crtc); 462 483 kfree(crtc); 463 484 } ··· 670 685 * pitch, the DCC state, rotation, etc. 671 686 */ 672 687 if (crtc_state->async_flip && 673 - dm_crtc_state->update_type > UPDATE_TYPE_FAST) { 688 + dm_crtc_state->update_type != UPDATE_TYPE_FAST) { 674 689 drm_dbg_atomic(crtc->dev, 675 690 "[CRTC:%d:%s] async flips are only supported for fast updates\n", 676 691 crtc->base.id, crtc->name); ··· 704 719 .get_scanout_position = amdgpu_crtc_get_scanout_position, 705 720 }; 706 721 722 + /* 723 + * This hysteresis filter as configured will: 724 + * 725 + * * Search through the latest 8[filter_history_size] entries in history, 726 + * skipping entries that are older than [filter_old_history_threshold] frames 727 + * (0 means ignore age) 728 + * * Searches for short-idle-periods that lasted shorter than 729 + * 4[filter_num_frames] frames-times 730 + * * If there is at least 1[filter_entry_count] short-idle-period, then a delay 731 + * of 4[activation_num_delay_frames] will applied before allowing idle 732 + * optimizations again. 733 + * * An additional delay of 11[sso_num_frames] is applied before enabling 734 + * panel-specific optimizations. 735 + * 736 + * The values were determined empirically on another OS, optimizing for Z8 737 + * residency on APUs when running a productivity + web browsing test. 738 + * 739 + * TODO: Run similar tests to determine if these values are also optimal for 740 + * Linux, and if each APU generation benefits differently. 741 + */ 742 + static struct amdgpu_dm_ism_config default_ism_config = { 743 + .filter_num_frames = 4, 744 + .filter_history_size = 8, 745 + .filter_entry_count = 1, 746 + .activation_num_delay_frames = 4, 747 + .filter_old_history_threshold = 0, 748 + .sso_num_frames = 11, 749 + }; 750 + 707 751 int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, 708 752 struct drm_plane *plane, 709 753 uint32_t crtc_index) ··· 762 748 763 749 if (res) 764 750 goto fail; 751 + 752 + amdgpu_dm_ism_init(&acrtc->ism, &default_ism_config); 765 753 766 754 drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs); 767 755
+6
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h
··· 27 27 #ifndef __AMDGPU_DM_CRTC_H__ 28 28 #define __AMDGPU_DM_CRTC_H__ 29 29 30 + void amdgpu_dm_crtc_set_panel_sr_feature( 31 + struct amdgpu_display_manager *dm, 32 + struct amdgpu_crtc *acrtc, 33 + struct dc_stream_state *stream, 34 + bool vblank_enabled, bool allow_sr_entry); 35 + 30 36 void amdgpu_dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc); 31 37 32 38 bool amdgpu_dm_crtc_modeset_required(struct drm_crtc_state *crtc_state,
+598
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_ism.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright 2026 Advanced Micro Devices, Inc. 4 + * 5 + * Permission is hereby granted, free of charge, to any person obtaining a 6 + * copy of this software and associated documentation files (the "Software"), 7 + * to deal in the Software without restriction, including without limitation 8 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 + * and/or sell copies of the Software, and to permit persons to whom the 10 + * Software is furnished to do so, subject to the following conditions: 11 + * 12 + * The above copyright notice and this permission notice shall be included in 13 + * all copies or substantial portions of the Software. 14 + * 15 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 + * OTHER DEALINGS IN THE SOFTWARE. 22 + * 23 + * Authors: AMD 24 + * 25 + */ 26 + 27 + #include <linux/types.h> 28 + #include <drm/drm_vblank.h> 29 + 30 + #include "dc.h" 31 + #include "amdgpu.h" 32 + #include "amdgpu_dm_ism.h" 33 + #include "amdgpu_dm_crtc.h" 34 + #include "amdgpu_dm_trace.h" 35 + 36 + /** 37 + * dm_ism_next_state - Get next state based on current state and event 38 + * 39 + * This function defines the idle state management FSM. Invalid transitions 40 + * are ignored and will not progress the FSM. 41 + */ 42 + static bool dm_ism_next_state(enum amdgpu_dm_ism_state current_state, 43 + enum amdgpu_dm_ism_event event, 44 + enum amdgpu_dm_ism_state *next_state) 45 + { 46 + switch (STATE_EVENT(current_state, event)) { 47 + case STATE_EVENT(DM_ISM_STATE_FULL_POWER_RUNNING, 48 + DM_ISM_EVENT_ENTER_IDLE_REQUESTED): 49 + *next_state = DM_ISM_STATE_HYSTERESIS_WAITING; 50 + break; 51 + case STATE_EVENT(DM_ISM_STATE_FULL_POWER_RUNNING, 52 + DM_ISM_EVENT_BEGIN_CURSOR_UPDATE): 53 + *next_state = DM_ISM_STATE_FULL_POWER_BUSY; 54 + break; 55 + 56 + case STATE_EVENT(DM_ISM_STATE_FULL_POWER_BUSY, 57 + DM_ISM_EVENT_ENTER_IDLE_REQUESTED): 58 + *next_state = DM_ISM_STATE_HYSTERESIS_BUSY; 59 + break; 60 + case STATE_EVENT(DM_ISM_STATE_FULL_POWER_BUSY, 61 + DM_ISM_EVENT_END_CURSOR_UPDATE): 62 + *next_state = DM_ISM_STATE_FULL_POWER_RUNNING; 63 + break; 64 + 65 + case STATE_EVENT(DM_ISM_STATE_HYSTERESIS_WAITING, 66 + DM_ISM_EVENT_EXIT_IDLE_REQUESTED): 67 + *next_state = DM_ISM_STATE_TIMER_ABORTED; 68 + break; 69 + case STATE_EVENT(DM_ISM_STATE_HYSTERESIS_WAITING, 70 + DM_ISM_EVENT_BEGIN_CURSOR_UPDATE): 71 + *next_state = DM_ISM_STATE_HYSTERESIS_BUSY; 72 + break; 73 + case STATE_EVENT(DM_ISM_STATE_HYSTERESIS_WAITING, 74 + DM_ISM_EVENT_TIMER_ELAPSED): 75 + *next_state = DM_ISM_STATE_OPTIMIZED_IDLE; 76 + break; 77 + case STATE_EVENT(DM_ISM_STATE_HYSTERESIS_WAITING, 78 + DM_ISM_EVENT_IMMEDIATE): 79 + *next_state = DM_ISM_STATE_OPTIMIZED_IDLE; 80 + break; 81 + 82 + case STATE_EVENT(DM_ISM_STATE_HYSTERESIS_BUSY, 83 + DM_ISM_EVENT_EXIT_IDLE_REQUESTED): 84 + *next_state = DM_ISM_STATE_FULL_POWER_BUSY; 85 + break; 86 + case STATE_EVENT(DM_ISM_STATE_HYSTERESIS_BUSY, 87 + DM_ISM_EVENT_END_CURSOR_UPDATE): 88 + *next_state = DM_ISM_STATE_HYSTERESIS_WAITING; 89 + break; 90 + 91 + case STATE_EVENT(DM_ISM_STATE_OPTIMIZED_IDLE, 92 + DM_ISM_EVENT_EXIT_IDLE_REQUESTED): 93 + *next_state = DM_ISM_STATE_FULL_POWER_RUNNING; 94 + break; 95 + case STATE_EVENT(DM_ISM_STATE_OPTIMIZED_IDLE, 96 + DM_ISM_EVENT_BEGIN_CURSOR_UPDATE): 97 + *next_state = DM_ISM_STATE_HYSTERESIS_BUSY; 98 + break; 99 + case STATE_EVENT(DM_ISM_STATE_OPTIMIZED_IDLE, 100 + DM_ISM_EVENT_SSO_TIMER_ELAPSED): 101 + case STATE_EVENT(DM_ISM_STATE_OPTIMIZED_IDLE, 102 + DM_ISM_EVENT_IMMEDIATE): 103 + *next_state = DM_ISM_STATE_OPTIMIZED_IDLE_SSO; 104 + break; 105 + 106 + case STATE_EVENT(DM_ISM_STATE_OPTIMIZED_IDLE_SSO, 107 + DM_ISM_EVENT_EXIT_IDLE_REQUESTED): 108 + *next_state = DM_ISM_STATE_FULL_POWER_RUNNING; 109 + break; 110 + case STATE_EVENT(DM_ISM_STATE_OPTIMIZED_IDLE_SSO, 111 + DM_ISM_EVENT_BEGIN_CURSOR_UPDATE): 112 + *next_state = DM_ISM_STATE_HYSTERESIS_BUSY; 113 + break; 114 + 115 + case STATE_EVENT(DM_ISM_STATE_TIMER_ABORTED, 116 + DM_ISM_EVENT_IMMEDIATE): 117 + *next_state = DM_ISM_STATE_FULL_POWER_RUNNING; 118 + break; 119 + 120 + default: 121 + return false; 122 + } 123 + return true; 124 + } 125 + 126 + static uint64_t dm_ism_get_sso_delay(const struct amdgpu_dm_ism *ism, 127 + const struct dc_stream_state *stream) 128 + { 129 + const struct amdgpu_dm_ism_config *config = &ism->config; 130 + uint32_t v_total, h_total; 131 + uint64_t one_frame_ns, sso_delay_ns; 132 + 133 + if (!stream) 134 + return 0; 135 + 136 + if (!config->sso_num_frames) 137 + return 0; 138 + 139 + v_total = stream->timing.v_total; 140 + h_total = stream->timing.h_total; 141 + 142 + one_frame_ns = div64_u64(v_total * h_total * 10000000ull, 143 + stream->timing.pix_clk_100hz); 144 + sso_delay_ns = config->sso_num_frames * one_frame_ns; 145 + 146 + return sso_delay_ns; 147 + } 148 + 149 + /** 150 + * dm_ism_get_idle_allow_delay - Calculate hysteresis-based idle allow delay 151 + */ 152 + static uint64_t dm_ism_get_idle_allow_delay(const struct amdgpu_dm_ism *ism, 153 + const struct dc_stream_state *stream) 154 + { 155 + const struct amdgpu_dm_ism_config *config = &ism->config; 156 + uint32_t v_total, h_total; 157 + uint64_t one_frame_ns, short_idle_ns, old_hist_ns; 158 + uint32_t history_size; 159 + int pos; 160 + uint32_t short_idle_count = 0; 161 + uint64_t ret_ns = 0; 162 + 163 + if (!stream) 164 + return 0; 165 + 166 + if (!config->filter_num_frames) 167 + return 0; 168 + if (!config->filter_entry_count) 169 + return 0; 170 + if (!config->activation_num_delay_frames) 171 + return 0; 172 + 173 + v_total = stream->timing.v_total; 174 + h_total = stream->timing.h_total; 175 + 176 + one_frame_ns = div64_u64(v_total * h_total * 10000000ull, 177 + stream->timing.pix_clk_100hz); 178 + 179 + short_idle_ns = config->filter_num_frames * one_frame_ns; 180 + old_hist_ns = config->filter_old_history_threshold * one_frame_ns; 181 + 182 + /* 183 + * Look back into the recent history and count how many times we entered 184 + * idle power state for a short duration of time 185 + */ 186 + history_size = min( 187 + max(config->filter_history_size, config->filter_entry_count), 188 + AMDGPU_DM_IDLE_HIST_LEN); 189 + pos = ism->next_record_idx; 190 + 191 + for (int k = 0; k < history_size; k++) { 192 + if (pos <= 0 || pos > AMDGPU_DM_IDLE_HIST_LEN) 193 + pos = AMDGPU_DM_IDLE_HIST_LEN; 194 + pos -= 1; 195 + 196 + if (ism->records[pos].duration_ns <= short_idle_ns) 197 + short_idle_count += 1; 198 + 199 + if (short_idle_count >= config->filter_entry_count) 200 + break; 201 + 202 + if (old_hist_ns > 0 && 203 + ism->last_idle_timestamp_ns - ism->records[pos].timestamp_ns > old_hist_ns) 204 + break; 205 + } 206 + 207 + if (short_idle_count >= config->filter_entry_count) 208 + ret_ns = config->activation_num_delay_frames * one_frame_ns; 209 + 210 + return ret_ns; 211 + } 212 + 213 + /** 214 + * dm_ism_insert_record - Insert a record into the circular history buffer 215 + */ 216 + static void dm_ism_insert_record(struct amdgpu_dm_ism *ism) 217 + { 218 + struct amdgpu_dm_ism_record *record; 219 + 220 + if (ism->next_record_idx < 0 || 221 + ism->next_record_idx >= AMDGPU_DM_IDLE_HIST_LEN) 222 + ism->next_record_idx = 0; 223 + 224 + record = &ism->records[ism->next_record_idx]; 225 + ism->next_record_idx += 1; 226 + 227 + record->timestamp_ns = ktime_get_ns(); 228 + record->duration_ns = 229 + record->timestamp_ns - ism->last_idle_timestamp_ns; 230 + } 231 + 232 + 233 + static void dm_ism_set_last_idle_ts(struct amdgpu_dm_ism *ism) 234 + { 235 + ism->last_idle_timestamp_ns = ktime_get_ns(); 236 + } 237 + 238 + 239 + static bool dm_ism_trigger_event(struct amdgpu_dm_ism *ism, 240 + enum amdgpu_dm_ism_event event) 241 + { 242 + enum amdgpu_dm_ism_state next_state; 243 + 244 + bool gotNextState = dm_ism_next_state(ism->current_state, event, 245 + &next_state); 246 + 247 + if (gotNextState) { 248 + ism->previous_state = ism->current_state; 249 + ism->current_state = next_state; 250 + } 251 + 252 + return gotNextState; 253 + } 254 + 255 + 256 + static void dm_ism_commit_idle_optimization_state(struct amdgpu_dm_ism *ism, 257 + struct dc_stream_state *stream, 258 + bool vblank_enabled, 259 + bool allow_panel_sso) 260 + { 261 + struct amdgpu_crtc *acrtc = ism_to_amdgpu_crtc(ism); 262 + struct amdgpu_device *adev = drm_to_adev(acrtc->base.dev); 263 + struct amdgpu_display_manager *dm = &adev->dm; 264 + int r; 265 + 266 + trace_amdgpu_dm_ism_commit(dm->active_vblank_irq_count, 267 + vblank_enabled, 268 + allow_panel_sso); 269 + 270 + /* 271 + * If there is an active vblank requestor, or if SSO is being engaged, 272 + * then disallow idle optimizations. 273 + */ 274 + if (vblank_enabled || allow_panel_sso) 275 + dc_allow_idle_optimizations(dm->dc, false); 276 + 277 + /* 278 + * Control PSR based on vblank requirements from OS 279 + * 280 + * If panel supports PSR SU/Replay, there's no need to exit self-refresh 281 + * when OS is submitting fast atomic commits, as they can allow 282 + * self-refresh during vblank periods. 283 + */ 284 + if (stream && stream->link) { 285 + /* 286 + * If allow_panel_sso is true when disabling vblank, allow 287 + * deeper panel sleep states such as PSR1 and Replay static 288 + * screen optimization. 289 + */ 290 + if (!vblank_enabled && allow_panel_sso) { 291 + amdgpu_dm_crtc_set_panel_sr_feature( 292 + dm, acrtc, stream, false, 293 + acrtc->dm_irq_params.allow_sr_entry); 294 + } else if (vblank_enabled) { 295 + /* Make sure to exit SSO on vblank enable */ 296 + amdgpu_dm_crtc_set_panel_sr_feature( 297 + dm, acrtc, stream, true, 298 + acrtc->dm_irq_params.allow_sr_entry); 299 + } 300 + /* 301 + * Else, vblank_enabled == false and allow_panel_sso == false; 302 + * do nothing here. 303 + */ 304 + } 305 + 306 + /* 307 + * Check for any active drm vblank requestors on other CRTCs 308 + * (dm->active_vblank_irq_count) before allowing HW-wide idle 309 + * optimizations. 310 + * 311 + * There's no need to have a "balanced" check when disallowing idle 312 + * optimizations at the start of this func -- we should disallow 313 + * whenever there's *an* active CRTC. 314 + */ 315 + if (!vblank_enabled && dm->active_vblank_irq_count == 0) { 316 + dc_post_update_surfaces_to_stream(dm->dc); 317 + 318 + r = amdgpu_dpm_pause_power_profile(adev, true); 319 + if (r) 320 + dev_warn(adev->dev, "failed to set default power profile mode\n"); 321 + 322 + dc_allow_idle_optimizations(dm->dc, true); 323 + 324 + r = amdgpu_dpm_pause_power_profile(adev, false); 325 + if (r) 326 + dev_warn(adev->dev, "failed to restore the power profile mode\n"); 327 + } 328 + } 329 + 330 + 331 + static enum amdgpu_dm_ism_event dm_ism_dispatch_power_state( 332 + struct amdgpu_dm_ism *ism, 333 + struct dm_crtc_state *acrtc_state, 334 + enum amdgpu_dm_ism_event event) 335 + { 336 + enum amdgpu_dm_ism_event ret = event; 337 + const struct amdgpu_dm_ism_config *config = &ism->config; 338 + uint64_t delay_ns, sso_delay_ns; 339 + 340 + switch (ism->previous_state) { 341 + case DM_ISM_STATE_HYSTERESIS_WAITING: 342 + /* 343 + * Stop the timer if it was set, and we're not running from the 344 + * idle allow worker. 345 + */ 346 + if (ism->current_state != DM_ISM_STATE_OPTIMIZED_IDLE && 347 + ism->current_state != DM_ISM_STATE_OPTIMIZED_IDLE_SSO) 348 + cancel_delayed_work(&ism->delayed_work); 349 + break; 350 + case DM_ISM_STATE_OPTIMIZED_IDLE: 351 + if (ism->current_state == DM_ISM_STATE_OPTIMIZED_IDLE_SSO) 352 + break; 353 + /* If idle disallow, cancel SSO work and insert record */ 354 + cancel_delayed_work(&ism->sso_delayed_work); 355 + dm_ism_insert_record(ism); 356 + dm_ism_commit_idle_optimization_state(ism, acrtc_state->stream, 357 + true, false); 358 + break; 359 + case DM_ISM_STATE_OPTIMIZED_IDLE_SSO: 360 + /* Disable idle optimization */ 361 + dm_ism_insert_record(ism); 362 + dm_ism_commit_idle_optimization_state(ism, acrtc_state->stream, 363 + true, false); 364 + break; 365 + default: 366 + break; 367 + } 368 + 369 + switch (ism->current_state) { 370 + case DM_ISM_STATE_HYSTERESIS_WAITING: 371 + dm_ism_set_last_idle_ts(ism); 372 + 373 + /* CRTC can be disabled; allow immediate idle */ 374 + if (!acrtc_state->stream) { 375 + ret = DM_ISM_EVENT_IMMEDIATE; 376 + break; 377 + } 378 + 379 + delay_ns = dm_ism_get_idle_allow_delay(ism, 380 + acrtc_state->stream); 381 + if (delay_ns == 0) { 382 + ret = DM_ISM_EVENT_IMMEDIATE; 383 + break; 384 + } 385 + 386 + /* Schedule worker */ 387 + mod_delayed_work(system_unbound_wq, &ism->delayed_work, 388 + nsecs_to_jiffies(delay_ns)); 389 + 390 + break; 391 + case DM_ISM_STATE_OPTIMIZED_IDLE: 392 + sso_delay_ns = dm_ism_get_sso_delay(ism, acrtc_state->stream); 393 + if (sso_delay_ns == 0) 394 + ret = DM_ISM_EVENT_IMMEDIATE; 395 + else if (config->sso_num_frames < config->filter_num_frames) { 396 + /* 397 + * If sso_num_frames is less than hysteresis frames, it 398 + * indicates that allowing idle here, then disallowing 399 + * idle after sso_num_frames has expired, will likely 400 + * have a negative power impact. Skip idle allow here, 401 + * and let the sso_delayed_work handle it. 402 + */ 403 + mod_delayed_work(system_unbound_wq, 404 + &ism->sso_delayed_work, 405 + nsecs_to_jiffies(sso_delay_ns)); 406 + } else { 407 + /* Enable idle optimization without SSO */ 408 + dm_ism_commit_idle_optimization_state( 409 + ism, acrtc_state->stream, false, false); 410 + mod_delayed_work(system_unbound_wq, 411 + &ism->sso_delayed_work, 412 + nsecs_to_jiffies(sso_delay_ns)); 413 + } 414 + break; 415 + case DM_ISM_STATE_OPTIMIZED_IDLE_SSO: 416 + /* Enable static screen optimizations. */ 417 + dm_ism_commit_idle_optimization_state(ism, acrtc_state->stream, 418 + false, true); 419 + break; 420 + case DM_ISM_STATE_TIMER_ABORTED: 421 + dm_ism_insert_record(ism); 422 + dm_ism_commit_idle_optimization_state(ism, acrtc_state->stream, 423 + true, false); 424 + ret = DM_ISM_EVENT_IMMEDIATE; 425 + break; 426 + default: 427 + break; 428 + } 429 + 430 + return ret; 431 + } 432 + 433 + static char *dm_ism_events_str[DM_ISM_NUM_EVENTS] = { 434 + [DM_ISM_EVENT_IMMEDIATE] = "IMMEDIATE", 435 + [DM_ISM_EVENT_ENTER_IDLE_REQUESTED] = "ENTER_IDLE_REQUESTED", 436 + [DM_ISM_EVENT_EXIT_IDLE_REQUESTED] = "EXIT_IDLE_REQUESTED", 437 + [DM_ISM_EVENT_BEGIN_CURSOR_UPDATE] = "BEGIN_CURSOR_UPDATE", 438 + [DM_ISM_EVENT_END_CURSOR_UPDATE] = "END_CURSOR_UPDATE", 439 + [DM_ISM_EVENT_TIMER_ELAPSED] = "TIMER_ELAPSED", 440 + [DM_ISM_EVENT_SSO_TIMER_ELAPSED] = "SSO_TIMER_ELAPSED", 441 + }; 442 + 443 + static char *dm_ism_states_str[DM_ISM_NUM_STATES] = { 444 + [DM_ISM_STATE_FULL_POWER_RUNNING] = "FULL_POWER_RUNNING", 445 + [DM_ISM_STATE_FULL_POWER_BUSY] = "FULL_POWER_BUSY", 446 + [DM_ISM_STATE_HYSTERESIS_WAITING] = "HYSTERESIS_WAITING", 447 + [DM_ISM_STATE_HYSTERESIS_BUSY] = "HYSTERESIS_BUSY", 448 + [DM_ISM_STATE_OPTIMIZED_IDLE] = "OPTIMIZED_IDLE", 449 + [DM_ISM_STATE_OPTIMIZED_IDLE_SSO] = "OPTIMIZED_IDLE_SSO", 450 + [DM_ISM_STATE_TIMER_ABORTED] = "TIMER_ABORTED", 451 + }; 452 + 453 + 454 + void amdgpu_dm_ism_commit_event(struct amdgpu_dm_ism *ism, 455 + enum amdgpu_dm_ism_event event) 456 + { 457 + enum amdgpu_dm_ism_event next_event = event; 458 + struct amdgpu_crtc *acrtc = ism_to_amdgpu_crtc(ism); 459 + struct amdgpu_device *adev = drm_to_adev(acrtc->base.dev); 460 + struct amdgpu_display_manager *dm = &adev->dm; 461 + struct dm_crtc_state *acrtc_state = to_dm_crtc_state(acrtc->base.state); 462 + 463 + /* ISM transitions must be called with mutex acquired */ 464 + ASSERT(mutex_is_locked(&dm->dc_lock)); 465 + 466 + if (!acrtc_state) { 467 + trace_amdgpu_dm_ism_event(acrtc->crtc_id, "NO_STATE", 468 + "NO_STATE", "N/A"); 469 + return; 470 + } 471 + 472 + do { 473 + bool transition = dm_ism_trigger_event(ism, event); 474 + 475 + next_event = DM_ISM_NUM_EVENTS; 476 + if (transition) { 477 + trace_amdgpu_dm_ism_event( 478 + acrtc->crtc_id, 479 + dm_ism_states_str[ism->previous_state], 480 + dm_ism_states_str[ism->current_state], 481 + dm_ism_events_str[event]); 482 + next_event = dm_ism_dispatch_power_state( 483 + ism, acrtc_state, next_event); 484 + } else { 485 + trace_amdgpu_dm_ism_event( 486 + acrtc->crtc_id, 487 + dm_ism_states_str[ism->current_state], 488 + dm_ism_states_str[ism->current_state], 489 + dm_ism_events_str[event]); 490 + } 491 + 492 + event = next_event; 493 + 494 + } while (next_event < DM_ISM_NUM_EVENTS); 495 + } 496 + 497 + 498 + static void dm_ism_delayed_work_func(struct work_struct *work) 499 + { 500 + struct amdgpu_dm_ism *ism = 501 + container_of(work, struct amdgpu_dm_ism, delayed_work.work); 502 + struct amdgpu_crtc *acrtc = ism_to_amdgpu_crtc(ism); 503 + struct amdgpu_device *adev = drm_to_adev(acrtc->base.dev); 504 + struct amdgpu_display_manager *dm = &adev->dm; 505 + 506 + guard(mutex)(&dm->dc_lock); 507 + 508 + amdgpu_dm_ism_commit_event(ism, DM_ISM_EVENT_TIMER_ELAPSED); 509 + } 510 + 511 + static void dm_ism_sso_delayed_work_func(struct work_struct *work) 512 + { 513 + struct amdgpu_dm_ism *ism = 514 + container_of(work, struct amdgpu_dm_ism, sso_delayed_work.work); 515 + struct amdgpu_crtc *acrtc = ism_to_amdgpu_crtc(ism); 516 + struct amdgpu_device *adev = drm_to_adev(acrtc->base.dev); 517 + struct amdgpu_display_manager *dm = &adev->dm; 518 + 519 + guard(mutex)(&dm->dc_lock); 520 + 521 + amdgpu_dm_ism_commit_event(ism, DM_ISM_EVENT_SSO_TIMER_ELAPSED); 522 + } 523 + 524 + /** 525 + * amdgpu_dm_ism_disable - Disable the ISM 526 + * 527 + * @dm: The amdgpu display manager 528 + * 529 + * Disable the idle state manager by disabling any ISM work, canceling pending 530 + * work, and waiting for in-progress work to finish. After disabling, the system 531 + * is left in DM_ISM_STATE_FULL_POWER_RUNNING state. 532 + */ 533 + void amdgpu_dm_ism_disable(struct amdgpu_display_manager *dm) 534 + { 535 + struct drm_crtc *crtc; 536 + struct amdgpu_crtc *acrtc; 537 + struct amdgpu_dm_ism *ism; 538 + 539 + drm_for_each_crtc(crtc, dm->ddev) { 540 + acrtc = to_amdgpu_crtc(crtc); 541 + ism = &acrtc->ism; 542 + 543 + /* Cancel and disable any pending work */ 544 + disable_delayed_work_sync(&ism->delayed_work); 545 + disable_delayed_work_sync(&ism->sso_delayed_work); 546 + 547 + /* 548 + * When disabled, leave in FULL_POWER_RUNNING state. 549 + * EXIT_IDLE will not queue any work 550 + */ 551 + amdgpu_dm_ism_commit_event(ism, 552 + DM_ISM_EVENT_EXIT_IDLE_REQUESTED); 553 + } 554 + } 555 + 556 + /** 557 + * amdgpu_dm_ism_enable - enable the ISM 558 + * 559 + * @dm: The amdgpu display manager 560 + * 561 + * Re-enable the idle state manager by enabling work that was disabled by 562 + * amdgpu_dm_ism_disable. 563 + */ 564 + void amdgpu_dm_ism_enable(struct amdgpu_display_manager *dm) 565 + { 566 + struct drm_crtc *crtc; 567 + struct amdgpu_crtc *acrtc; 568 + struct amdgpu_dm_ism *ism; 569 + 570 + drm_for_each_crtc(crtc, dm->ddev) { 571 + acrtc = to_amdgpu_crtc(crtc); 572 + ism = &acrtc->ism; 573 + 574 + enable_delayed_work(&ism->delayed_work); 575 + enable_delayed_work(&ism->sso_delayed_work); 576 + } 577 + } 578 + 579 + void amdgpu_dm_ism_init(struct amdgpu_dm_ism *ism, 580 + struct amdgpu_dm_ism_config *config) 581 + { 582 + ism->config = *config; 583 + 584 + ism->current_state = DM_ISM_STATE_FULL_POWER_RUNNING; 585 + ism->previous_state = DM_ISM_STATE_FULL_POWER_RUNNING; 586 + ism->next_record_idx = 0; 587 + ism->last_idle_timestamp_ns = 0; 588 + 589 + INIT_DELAYED_WORK(&ism->delayed_work, dm_ism_delayed_work_func); 590 + INIT_DELAYED_WORK(&ism->sso_delayed_work, dm_ism_sso_delayed_work_func); 591 + } 592 + 593 + 594 + void amdgpu_dm_ism_fini(struct amdgpu_dm_ism *ism) 595 + { 596 + cancel_delayed_work_sync(&ism->sso_delayed_work); 597 + cancel_delayed_work_sync(&ism->delayed_work); 598 + }
+151
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_ism.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright 2026 Advanced Micro Devices, Inc. 4 + * 5 + * Permission is hereby granted, free of charge, to any person obtaining a 6 + * copy of this software and associated documentation files (the "Software"), 7 + * to deal in the Software without restriction, including without limitation 8 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 + * and/or sell copies of the Software, and to permit persons to whom the 10 + * Software is furnished to do so, subject to the following conditions: 11 + * 12 + * The above copyright notice and this permission notice shall be included in 13 + * all copies or substantial portions of the Software. 14 + * 15 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 + * OTHER DEALINGS IN THE SOFTWARE. 22 + * 23 + * Authors: AMD 24 + * 25 + */ 26 + 27 + #ifndef __AMDGPU_DM_ISM_H__ 28 + #define __AMDGPU_DM_ISM_H__ 29 + 30 + #include <linux/workqueue.h> 31 + 32 + struct amdgpu_crtc; 33 + struct amdgpu_display_manager; 34 + 35 + #define AMDGPU_DM_IDLE_HIST_LEN 16 36 + 37 + enum amdgpu_dm_ism_state { 38 + DM_ISM_STATE_FULL_POWER_RUNNING, 39 + DM_ISM_STATE_FULL_POWER_BUSY, 40 + DM_ISM_STATE_HYSTERESIS_WAITING, 41 + DM_ISM_STATE_HYSTERESIS_BUSY, 42 + DM_ISM_STATE_OPTIMIZED_IDLE, 43 + DM_ISM_STATE_OPTIMIZED_IDLE_SSO, 44 + DM_ISM_STATE_TIMER_ABORTED, 45 + DM_ISM_NUM_STATES, 46 + }; 47 + 48 + enum amdgpu_dm_ism_event { 49 + DM_ISM_EVENT_IMMEDIATE, 50 + DM_ISM_EVENT_ENTER_IDLE_REQUESTED, 51 + DM_ISM_EVENT_EXIT_IDLE_REQUESTED, 52 + DM_ISM_EVENT_BEGIN_CURSOR_UPDATE, 53 + DM_ISM_EVENT_END_CURSOR_UPDATE, 54 + DM_ISM_EVENT_TIMER_ELAPSED, 55 + DM_ISM_EVENT_SSO_TIMER_ELAPSED, 56 + DM_ISM_NUM_EVENTS, 57 + }; 58 + 59 + #define STATE_EVENT(state, event) (((state) << 8) | (event)) 60 + 61 + struct amdgpu_dm_ism_config { 62 + 63 + /** 64 + * @filter_num_frames: Idle periods shorter than this number of frames 65 + * will be considered a "short idle period" for filtering. 66 + * 67 + * 0 indicates no filtering (i.e. no idle allow delay will be applied) 68 + */ 69 + unsigned int filter_num_frames; 70 + 71 + /** 72 + * @filter_history_size: Number of recent idle periods to consider when 73 + * counting the number of short idle periods. 74 + */ 75 + unsigned int filter_history_size; 76 + 77 + /** 78 + * @filter_entry_count: When the number of short idle periods within 79 + * recent &filter_history_size reaches this count, the idle allow delay 80 + * will be applied. 81 + * 82 + * 0 indicates no filtering (i.e. no idle allow delay will be applied) 83 + */ 84 + unsigned int filter_entry_count; 85 + 86 + /** 87 + * @activation_num_delay_frames: Defines the number of frames to wait 88 + * for the idle allow delay. 89 + * 90 + * 0 indicates no filtering (i.e. no idle allow delay will be applied) 91 + */ 92 + unsigned int activation_num_delay_frames; 93 + 94 + /** 95 + * @filter_old_history_threshold: A time-based restriction on top of 96 + * &filter_history_size. Idle periods older than this threshold (in 97 + * number of frames) will be ignored when counting the number of short 98 + * idle periods. 99 + * 100 + * 0 indicates no time-based restriction, i.e. history is limited only 101 + * by &filter_history_size. 102 + */ 103 + unsigned int filter_old_history_threshold; 104 + 105 + /** 106 + * @sso_num_frames: Number of frames to delay before enabling static 107 + * screen optimizations, such as PSR1 and Replay low HZ idle mode. 108 + * 109 + * 0 indicates immediate SSO enable upon allowing idle. 110 + */ 111 + unsigned int sso_num_frames; 112 + }; 113 + 114 + struct amdgpu_dm_ism_record { 115 + /** 116 + * @timestamp_ns: When idle was allowed 117 + */ 118 + unsigned long long timestamp_ns; 119 + 120 + /** 121 + * @duration_ns: How long idle was allowed 122 + */ 123 + unsigned long long duration_ns; 124 + }; 125 + 126 + struct amdgpu_dm_ism { 127 + struct amdgpu_dm_ism_config config; 128 + unsigned long long last_idle_timestamp_ns; 129 + 130 + enum amdgpu_dm_ism_state current_state; 131 + enum amdgpu_dm_ism_state previous_state; 132 + 133 + struct amdgpu_dm_ism_record records[AMDGPU_DM_IDLE_HIST_LEN]; 134 + int next_record_idx; 135 + 136 + struct delayed_work delayed_work; 137 + struct delayed_work sso_delayed_work; 138 + }; 139 + 140 + #define ism_to_amdgpu_crtc(ism_ptr) \ 141 + container_of(ism_ptr, struct amdgpu_crtc, ism) 142 + 143 + void amdgpu_dm_ism_init(struct amdgpu_dm_ism *ism, 144 + struct amdgpu_dm_ism_config *config); 145 + void amdgpu_dm_ism_fini(struct amdgpu_dm_ism *ism); 146 + void amdgpu_dm_ism_commit_event(struct amdgpu_dm_ism *ism, 147 + enum amdgpu_dm_ism_event event); 148 + void amdgpu_dm_ism_disable(struct amdgpu_display_manager *dm); 149 + void amdgpu_dm_ism_enable(struct amdgpu_display_manager *dm); 150 + 151 + #endif
+18 -4
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
··· 954 954 return r; 955 955 } 956 956 957 - r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1); 958 - if (r) { 959 - drm_err(adev_to_drm(adev), "reserving fence slot failed (%d)\n", r); 957 + r = dma_resv_reserve_fences(rbo->tbo.base.resv, TTM_NUM_MOVE_FENCES); 958 + if (r) 960 959 goto error_unlock; 961 - } 962 960 963 961 if (plane->type != DRM_PLANE_TYPE_CURSOR) 964 962 domain = amdgpu_display_supported_domains(adev, rbo->flags); ··· 1372 1374 /* turn off cursor */ 1373 1375 if (crtc_state && crtc_state->stream) { 1374 1376 mutex_lock(&adev->dm.dc_lock); 1377 + amdgpu_dm_ism_commit_event( 1378 + &amdgpu_crtc->ism, 1379 + DM_ISM_EVENT_BEGIN_CURSOR_UPDATE); 1380 + 1375 1381 dc_stream_program_cursor_position(crtc_state->stream, 1376 1382 &position); 1383 + 1384 + amdgpu_dm_ism_commit_event( 1385 + &amdgpu_crtc->ism, 1386 + DM_ISM_EVENT_END_CURSOR_UPDATE); 1377 1387 mutex_unlock(&adev->dm.dc_lock); 1378 1388 } 1379 1389 return; ··· 1411 1405 1412 1406 if (crtc_state->stream) { 1413 1407 mutex_lock(&adev->dm.dc_lock); 1408 + amdgpu_dm_ism_commit_event( 1409 + &amdgpu_crtc->ism, 1410 + DM_ISM_EVENT_BEGIN_CURSOR_UPDATE); 1411 + 1414 1412 if (!dc_stream_program_cursor_attributes(crtc_state->stream, 1415 1413 &attributes)) 1416 1414 DRM_ERROR("DC failed to set cursor attributes\n"); ··· 1422 1412 if (!dc_stream_program_cursor_position(crtc_state->stream, 1423 1413 &position)) 1424 1414 DRM_ERROR("DC failed to set cursor position\n"); 1415 + 1416 + amdgpu_dm_ism_commit_event( 1417 + &amdgpu_crtc->ism, 1418 + DM_ISM_EVENT_END_CURSOR_UPDATE); 1425 1419 mutex_unlock(&adev->dm.dc_lock); 1426 1420 } 1427 1421 }
+63
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
··· 753 753 ) 754 754 ); 755 755 756 + TRACE_EVENT(amdgpu_dm_ism_commit, 757 + TP_PROTO( 758 + int active_vblank_irq_count, 759 + bool vblank_enabled, 760 + bool allow_panel_sso 761 + ), 762 + TP_ARGS( 763 + active_vblank_irq_count, 764 + vblank_enabled, 765 + allow_panel_sso 766 + ), 767 + TP_STRUCT__entry( 768 + __field(int, active_vblank_irq_count) 769 + __field(bool, vblank_enabled) 770 + __field(bool, allow_panel_sso) 771 + ), 772 + TP_fast_assign( 773 + __entry->active_vblank_irq_count = active_vblank_irq_count; 774 + __entry->vblank_enabled = vblank_enabled; 775 + __entry->allow_panel_sso = allow_panel_sso; 776 + ), 777 + TP_printk( 778 + "active_vblank_irq_count=%d vblank_enabled=%d allow_panel_sso=%d", 779 + __entry->active_vblank_irq_count, 780 + __entry->vblank_enabled, 781 + __entry->allow_panel_sso 782 + ) 783 + ); 784 + 785 + TRACE_EVENT(amdgpu_dm_ism_event, 786 + TP_PROTO( 787 + int crtc_id, 788 + const char *prev_state, 789 + const char *curr_state, 790 + const char *event 791 + ), 792 + TP_ARGS( 793 + crtc_id, 794 + prev_state, 795 + curr_state, 796 + event 797 + ), 798 + TP_STRUCT__entry( 799 + __field(int, crtc_id) 800 + __string(prev_state, prev_state) 801 + __string(curr_state, curr_state) 802 + __string(event, event) 803 + ), 804 + TP_fast_assign( 805 + __entry->crtc_id = crtc_id; 806 + __assign_str(prev_state); 807 + __assign_str(curr_state); 808 + __assign_str(event); 809 + ), 810 + TP_printk( 811 + "[CRTC %d] %s -> %s on event %s", 812 + __entry->crtc_id, 813 + __get_str(prev_state), 814 + __get_str(curr_state), 815 + __get_str(event)) 816 + ); 817 + 818 + 756 819 #endif /* _AMDGPU_DM_TRACE_H_ */ 757 820 758 821 #undef TRACE_INCLUDE_PATH
+2 -4
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
··· 106 106 return r; 107 107 } 108 108 109 - r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1); 110 - if (r) { 111 - drm_err(adev_to_drm(adev), "reserving fence slot failed (%d)\n", r); 109 + r = dma_resv_reserve_fences(rbo->tbo.base.resv, TTM_NUM_MOVE_FENCES); 110 + if (r) 112 111 goto error_unlock; 113 - } 114 112 115 113 domain = amdgpu_display_supported_domains(adev, rbo->flags); 116 114
+22 -3
drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
··· 53 53 { 54 54 int depth; 55 55 56 - depth = __this_cpu_read(fpu_recursion_depth); 56 + depth = this_cpu_read(fpu_recursion_depth); 57 57 58 58 ASSERT(depth >= 1); 59 + } 60 + 61 + /** 62 + * dc_assert_fp_enabled - Check if FPU protection is enabled 63 + * 64 + * This function tells if the code is already under FPU protection or not. A 65 + * function that works as an API for a set of FPU operations can use this 66 + * function for checking if the caller invoked it after DC_FP_START(). For 67 + * example, take a look at dcn20_fpu.c file. 68 + * 69 + * Similar to dc_assert_fp_enabled, but does not assert, returns status instead. 70 + */ 71 + inline bool dc_is_fp_enabled(void) 72 + { 73 + int depth; 74 + 75 + depth = this_cpu_read(fpu_recursion_depth); 76 + 77 + return (depth >= 1); 59 78 } 60 79 61 80 /** ··· 96 77 97 78 WARN_ON_ONCE(!in_task()); 98 79 preempt_disable(); 99 - depth = __this_cpu_inc_return(fpu_recursion_depth); 80 + depth = this_cpu_inc_return(fpu_recursion_depth); 100 81 if (depth == 1) { 101 82 BUG_ON(!kernel_fpu_available()); 102 83 kernel_fpu_begin(); ··· 119 100 { 120 101 int depth; 121 102 122 - depth = __this_cpu_dec_return(fpu_recursion_depth); 103 + depth = this_cpu_dec_return(fpu_recursion_depth); 123 104 if (depth == 0) { 124 105 kernel_fpu_end(); 125 106 } else {
+16 -1
drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.h
··· 28 28 #define __DC_FPU_H__ 29 29 30 30 void dc_assert_fp_enabled(void); 31 + bool dc_is_fp_enabled(void); 31 32 void dc_fpu_begin(const char *function_name, const int line); 32 33 void dc_fpu_end(const char *function_name, const int line); 33 34 34 35 #ifndef _LINUX_FPU_COMPILATION_UNIT 35 36 #define DC_FP_START() dc_fpu_begin(__func__, __LINE__) 36 37 #define DC_FP_END() dc_fpu_end(__func__, __LINE__) 38 + #ifdef CONFIG_DRM_AMD_DC_FP 39 + #define DC_RUN_WITH_PREEMPTION_ENABLED(code) \ 40 + do { \ 41 + bool dc_fp_enabled = dc_is_fp_enabled(); \ 42 + if (dc_fp_enabled) \ 43 + DC_FP_END(); \ 44 + code; \ 45 + if (dc_fp_enabled) \ 46 + DC_FP_START(); \ 47 + } while (0) 48 + #else 49 + #define DC_RUN_WITH_PREEMPTION_ENABLED(code) code 50 + #endif // !CONFIG_DRM_AMD_DC_FP 37 51 #else 38 52 #define DC_FP_START() BUILD_BUG() 39 53 #define DC_FP_END() BUILD_BUG() 40 - #endif 54 + #define DC_RUN_WITH_PREEMPTION_ENABLED(code) code 55 + #endif // !_LINUX_FPU_COMPILATION_UNIT 41 56 42 57 #endif /* __DC_FPU_H__ */
+4 -4
drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
··· 2010 2010 } 2011 2011 /*output link bit per pixel supported*/ 2012 2012 for (k = 0; k <= maximum_number_of_surfaces - 1; k++) { 2013 - data->output_bpphdmi[k] = bw_def_na; 2014 - data->output_bppdp4_lane_hbr[k] = bw_def_na; 2015 - data->output_bppdp4_lane_hbr2[k] = bw_def_na; 2016 - data->output_bppdp4_lane_hbr3[k] = bw_def_na; 2013 + data->output_bpphdmi[k] = (uint32_t)bw_def_na; 2014 + data->output_bppdp4_lane_hbr[k] = (uint32_t)bw_def_na; 2015 + data->output_bppdp4_lane_hbr2[k] = (uint32_t)bw_def_na; 2016 + data->output_bppdp4_lane_hbr3[k] = (uint32_t)bw_def_na; 2017 2017 if (data->enable[k]) { 2018 2018 data->output_bpphdmi[k] = bw_fixed_to_int(bw_mul(bw_div(bw_min2(bw_int_to_fixed(600), data->max_phyclk), data->pixel_rate[k]), bw_int_to_fixed(24))); 2019 2019 if (bw_meq(data->max_phyclk, bw_int_to_fixed(270))) {
+1
drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
··· 503 503 unsigned int integer_bits, 504 504 unsigned int fractional_bits) 505 505 { 506 + (void)integer_bits; 506 507 struct fixed31_32 fixpt_value = dc_fixpt_from_int(int_value); 507 508 508 509 fixpt_value.value |= (long long)frac_value << (FIXED31_32_BITS_PER_FRACTIONAL_PART - fractional_bits);
+1
drivers/gpu/drm/amd/display/dc/basics/vector.c
··· 56 56 void *initial_value, 57 57 uint32_t struct_size) 58 58 { 59 + (void)ctx; 59 60 uint32_t i; 60 61 61 62 vector->container = NULL;
+2 -1
drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
··· 1963 1963 count = (le16_to_cpu(header->sHeader.usStructureSize) 1964 1964 - sizeof(ATOM_COMMON_TABLE_HEADER)) 1965 1965 / sizeof(ATOM_GPIO_I2C_ASSIGMENT); 1966 - if (count < record->sucI2cId.bfI2C_LineMux) 1966 + if (count <= record->sucI2cId.bfI2C_LineMux) 1967 1967 return BP_RESULT_BADBIOSTABLE; 1968 1968 1969 1969 /* get the GPIO_I2C_INFO */ ··· 2696 2696 struct slot_layout_info *slot_layout_info, 2697 2697 unsigned int record_offset) 2698 2698 { 2699 + (void)i; 2699 2700 unsigned int j; 2700 2701 struct bios_parser *bp; 2701 2702 ATOM_BRACKET_LAYOUT_RECORD *record;
+9
drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
··· 205 205 struct graphics_object_id object_id, uint32_t index, 206 206 struct graphics_object_id *src_object_id) 207 207 { 208 + (void)index; 208 209 struct bios_parser *bp = BP_FROM_DCB(dcb); 209 210 unsigned int i; 210 211 enum bp_result bp_result = BP_RESULT_BADINPUT; ··· 766 765 uint32_t device_tag_index, 767 766 struct connector_device_tag_info *info) 768 767 { 768 + (void)device_tag_index; 769 769 struct bios_parser *bp = BP_FROM_DCB(dcb); 770 770 struct atom_display_object_path_v2 *object; 771 771 ··· 811 809 uint32_t index, 812 810 struct spread_spectrum_info *ss_info) 813 811 { 812 + (void)index; 814 813 enum bp_result result = BP_RESULT_OK; 815 814 struct atom_display_controller_info_v4_1 *disp_cntl_tbl = NULL; 816 815 struct atom_smu_info_v3_3 *smu_info = NULL; ··· 900 897 uint32_t index, 901 898 struct spread_spectrum_info *ss_info) 902 899 { 900 + (void)index; 903 901 enum bp_result result = BP_RESULT_OK; 904 902 struct atom_display_controller_info_v4_2 *disp_cntl_tbl = NULL; 905 903 struct atom_smu_info_v3_1 *smu_info = NULL; ··· 981 977 uint32_t index, 982 978 struct spread_spectrum_info *ss_info) 983 979 { 980 + (void)index; 984 981 enum bp_result result = BP_RESULT_OK; 985 982 struct atom_display_controller_info_v4_5 *disp_cntl_tbl = NULL; 986 983 ··· 1609 1604 struct dc_bios *dcb, 1610 1605 enum as_signal_type signal) 1611 1606 { 1607 + (void)dcb; 1608 + (void)signal; 1612 1609 /* TODO: DAL2 atomfirmware implementation does not need this. 1613 1610 * why DAL3 need this? 1614 1611 */ ··· 3543 3536 struct dc_bios *dcb, 3544 3537 void *dst) 3545 3538 { 3539 + (void)dcb; 3540 + (void)dst; 3546 3541 // TODO: There is data bytes alignment issue, disable it for now. 3547 3542 return 0; 3548 3543 }
+2
drivers/gpu/drm/amd/display/dc/bios/command_table2.c
··· 783 783 struct bios_parser *bp, 784 784 struct bp_external_encoder_control *cntl) 785 785 { 786 + (void)bp; 787 + (void)cntl; 786 788 /* TODO */ 787 789 return BP_RESULT_OK; 788 790 }
+1
drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c
··· 94 94 95 95 static uint8_t dig_encoder_sel_to_atom(enum engine_id id) 96 96 { 97 + (void)id; 97 98 /* On any ASIC after DCE80, we manually program the DIG_FE 98 99 * selection (see connect_dig_be_to_fe function of the link 99 100 * encoder), so translation should always return 0 (no FE).
+1
drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c
··· 93 93 94 94 static uint8_t dig_encoder_sel_to_atom(enum engine_id id) 95 95 { 96 + (void)id; 96 97 /* On any ASIC after DCE80, we manually program the DIG_FE 97 98 * selection (see connect_dig_be_to_fe function of the link 98 99 * encoder), so translation should always return 0 (no FE).
+1
drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c
··· 91 91 92 92 static uint8_t dig_encoder_sel_to_atom(enum engine_id id) 93 93 { 94 + (void)id; 94 95 /* On any ASIC after DCE80, we manually program the DIG_FE 95 96 * selection (see connect_dig_be_to_fe function of the link 96 97 * encoder), so translation should always return 0 (no FE).
+3 -2
drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
··· 78 78 struct dc *dc, 79 79 struct dc_state *context) 80 80 { 81 + (void)dc; 81 82 int i, total_plane_count; 82 83 83 84 total_plane_count = 0; ··· 98 97 { 99 98 struct dc_link *edp_links[MAX_NUM_EDP]; 100 99 struct dc_link *edp_link = NULL; 101 - int edp_num; 100 + unsigned int edp_num; 102 101 unsigned int panel_inst; 103 102 104 103 dc_get_edp_links(dc, edp_links, &edp_num); ··· 124 123 { 125 124 struct dc_link *edp_links[MAX_NUM_EDP]; 126 125 struct dc_link *edp_link = NULL; 127 - int edp_num; 126 + unsigned int edp_num; 128 127 unsigned int panel_inst; 129 128 130 129 dc_get_edp_links(dc, edp_links, &edp_num);
+1 -1
drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
··· 92 92 uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context) 93 93 { 94 94 uint8_t j; 95 - uint32_t min_vertical_blank_time = -1; 95 + uint32_t min_vertical_blank_time = (uint32_t)-1; 96 96 97 97 for (j = 0; j < context->stream_count; j++) { 98 98 struct dc_stream_state *stream = context->streams[j];
+2 -1
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
··· 740 740 if (clk_mgr->base.dentist_vco_freq_khz == 0) 741 741 clk_mgr->base.dentist_vco_freq_khz = 3600000; 742 742 743 - if (ctx->dc_bios->integrated_info->memory_type == LpDdr4MemType) { 743 + if (ctx->dc_bios->integrated_info && 744 + ctx->dc_bios->integrated_info->memory_type == LpDdr4MemType) { 744 745 if (clk_mgr->periodic_retraining_disabled) { 745 746 rn_bw_params.wm_table = lpddr4_wm_table_with_disabled_ppt; 746 747 } else {
+1 -2
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
··· 421 421 clk_mgr_base->bw_params->dc_mode_softmax_memclk = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_UCLK); 422 422 423 423 /* Refresh bounding box */ 424 - DC_FP_START(); 425 424 clk_mgr_base->ctx->dc->res_pool->funcs->update_bw_bounding_box( 426 425 clk_mgr->base.ctx->dc, clk_mgr_base->bw_params); 427 - DC_FP_END(); 428 426 } 429 427 430 428 static bool dcn3_is_smu_present(struct clk_mgr *clk_mgr_base) ··· 521 523 struct pp_smu_funcs *pp_smu, 522 524 struct dccg *dccg) 523 525 { 526 + (void)pp_smu; 524 527 struct clk_state_registers_and_bypass s = { 0 }; 525 528 526 529 clk_mgr->base.ctx = ctx;
+4 -3
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
··· 733 733 if (clk_mgr->base.base.dentist_vco_freq_khz == 0) 734 734 clk_mgr->base.base.dentist_vco_freq_khz = 3600000; 735 735 736 - if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) { 736 + if (ctx->dc_bios->integrated_info && 737 + ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) 737 738 vg_bw_params.wm_table = lpddr5_wm_table; 738 - } else { 739 + else 739 740 vg_bw_params.wm_table = ddr4_wm_table; 740 - } 741 + 741 742 /* Saved clocks configured at boot for debug purposes */ 742 743 vg_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info); 743 744
+7 -3
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
··· 329 329 static void dcn31_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass, 330 330 struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info) 331 331 { 332 + (void)regs_and_bypass; 333 + (void)clk_mgr_base; 334 + (void)log_info; 332 335 return; 333 336 } 334 337 ··· 728 725 /* TODO: Check we get what we expect during bringup */ 729 726 clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base); 730 727 731 - if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) { 728 + if (ctx->dc_bios->integrated_info && 729 + ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) 732 730 dcn31_bw_params.wm_table = lpddr5_wm_table; 733 - } else { 731 + else 734 732 dcn31_bw_params.wm_table = ddr5_wm_table; 735 - } 733 + 736 734 /* Saved clocks configured at boot for debug purposes */ 737 735 dcn31_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, 738 736 &clk_mgr->base.base, &log_info);
+5 -1
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
··· 395 395 static void dcn314_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass, 396 396 struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info) 397 397 { 398 + (void)regs_and_bypass; 399 + (void)clk_mgr_base; 400 + (void)log_info; 398 401 return; 399 402 } 400 403 ··· 845 842 /* TODO: Check we get what we expect during bringup */ 846 843 clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base); 847 844 848 - if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) 845 + if (ctx->dc_bios->integrated_info && 846 + ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) 849 847 dcn314_bw_params.wm_table = lpddr5_wm_table; 850 848 else 851 849 dcn314_bw_params.wm_table = ddr5_wm_table;
+7 -3
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
··· 247 247 static void dcn315_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass, 248 248 struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info) 249 249 { 250 + (void)regs_and_bypass; 251 + (void)clk_mgr_base; 252 + (void)log_info; 250 253 return; 251 254 } 252 255 ··· 655 652 if (clk_mgr->base.smu_ver > 0) 656 653 clk_mgr->base.smu_present = true; 657 654 658 - if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) { 655 + if (ctx->dc_bios->integrated_info && 656 + ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) 659 657 dcn315_bw_params.wm_table = lpddr5_wm_table; 660 - } else { 658 + else 661 659 dcn315_bw_params.wm_table = ddr5_wm_table; 662 - } 660 + 663 661 /* Saved clocks configured at boot for debug purposes */ 664 662 dcn315_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, 665 663 &clk_mgr->base.base, &log_info);
+7 -3
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
··· 255 255 static void dcn316_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass, 256 256 struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info) 257 257 { 258 + (void)regs_and_bypass; 259 + (void)clk_mgr_base; 260 + (void)log_info; 258 261 return; 259 262 } 260 263 ··· 639 636 clk_mgr->base.base.dentist_vco_freq_khz = 2500000; /* 2400MHz */ 640 637 641 638 642 - if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) { 639 + if (ctx->dc_bios->integrated_info && 640 + ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) 643 641 dcn316_bw_params.wm_table = lpddr5_wm_table; 644 - } else { 642 + else 645 643 dcn316_bw_params.wm_table = ddr4_wm_table; 646 - } 644 + 647 645 /* Saved clocks configured at boot for debug purposes */ 648 646 dcn316_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, 649 647 &clk_mgr->base.base, &log_info);
+2 -2
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
··· 872 872 static void dcn32_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass, 873 873 struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info) 874 874 { 875 + (void)log_info; 875 876 struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); 876 877 uint32_t dprefclk_did = 0; 877 878 uint32_t dcfclk_did = 0; ··· 1060 1059 if (!clk_mgr->dpm_present) 1061 1060 dcn32_patch_dpm_table(clk_mgr_base->bw_params); 1062 1061 1063 - DC_FP_START(); 1064 1062 /* Refresh bounding box */ 1065 1063 clk_mgr_base->ctx->dc->res_pool->funcs->update_bw_bounding_box( 1066 1064 clk_mgr->base.ctx->dc, clk_mgr_base->bw_params); 1067 - DC_FP_END(); 1068 1065 } 1069 1066 1070 1067 static bool dcn32_are_clock_states_equal(struct dc_clocks *a, ··· 1146 1147 struct pp_smu_funcs *pp_smu, 1147 1148 struct dccg *dccg) 1148 1149 { 1150 + (void)pp_smu; 1149 1151 struct clk_log_info log_info = {0}; 1150 1152 1151 1153 clk_mgr->base.ctx = ctx;
+4 -3
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
··· 1464 1464 /* TODO: Check we get what we expect during bringup */ 1465 1465 clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base); 1466 1466 1467 - if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) { 1467 + if (ctx->dc_bios->integrated_info && 1468 + ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) 1468 1469 dcn35_bw_params.wm_table = lpddr5_wm_table; 1469 - } else { 1470 + else 1470 1471 dcn35_bw_params.wm_table = ddr5_wm_table; 1471 - } 1472 + 1472 1473 /* Saved clocks configured at boot for debug purposes */ 1473 1474 dcn35_save_clk_registers(&clk_mgr->base.base.boot_snapshot, clk_mgr); 1474 1475
+3
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
··· 333 333 static void dcn401_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass, 334 334 struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info) 335 335 { 336 + (void)log_info; 336 337 struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); 337 338 uint32_t dprefclk_did = 0; 338 339 uint32_t dcfclk_did = 0; ··· 526 525 struct dc_state *context, 527 526 int ref_dtbclk_khz) 528 527 { 528 + (void)ref_dtbclk_khz; 529 529 int i; 530 530 struct dccg *dccg = clk_mgr->dccg; 531 531 struct pipe_ctx *otg_master; ··· 616 614 struct clk_mgr_internal *clk_mgr, 617 615 struct dc_state *context) 618 616 { 617 + (void)context; 619 618 uint32_t new_disp_divider = 0; 620 619 uint32_t new_dispclk_wdivider = 0; 621 620 uint32_t dentist_dispclk_wdivider_readback = 0;
+19 -12
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn42/dcn42_clk_mgr.c
··· 43 43 #define DC_LOGGER_INIT(logger) \ 44 44 struct dal_logger *dc_logger = logger 45 45 46 - #define DCN42_CLKIP_REFCLK 48000 47 - 48 46 #undef FN 49 47 #define FN(reg_name, field_name) \ 50 48 clk_mgr->clk_mgr_shift->field_name, clk_mgr->clk_mgr_mask->field_name ··· 158 160 struct dc_state *context, 159 161 int ref_dtbclk_khz) 160 162 { 163 + (void)clk_mgr; 164 + (void)context; 165 + (void)ref_dtbclk_khz; 161 166 /* DCN42 does not implement set_dtbclk_dto function, so this is a no-op */ 162 167 } 163 168 ··· 256 255 dcn42_smu_set_zstate_support(clk_mgr, DCN_ZSTATE_SUPPORT_DISALLOW); 257 256 clk_mgr_base->clks.zstate_support = new_clocks->zstate_support; 258 257 } 258 + /* Only attempt to enable dtbclk if currently disabled AND new state requests it. 259 + * For dcn42b (no dtbclk hardware), init_clk_states sets dtbclk_en=false and 260 + * new_clocks->dtbclk_en should always be false, so this block never executes. 261 + */ 259 262 if (!clk_mgr_base->clks.dtbclk_en && new_clocks->dtbclk_en) { 260 263 int actual_dtbclk = 0; 261 264 ··· 331 326 } 332 327 333 328 /* clock limits are received with MHz precision, divide by 1000 to prevent setting clocks at every call */ 334 - if (!dc->debug.disable_dtb_ref_clk_switch && 329 + if (!dc->debug.disable_dtb_ref_clk_switch && new_clocks->dtbclk_en && 335 330 should_set_clock(safe_to_lower, new_clocks->ref_dtbclk_khz / 1000, 336 331 clk_mgr_base->clks.ref_dtbclk_khz / 1000)) { 337 332 dcn42_update_clocks_update_dtb_dto(clk_mgr, context, new_clocks->ref_dtbclk_khz); ··· 524 519 clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN; 525 520 } 526 521 527 - static void dcn42_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr, 522 + void dcn42_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr, 528 523 struct dcn42_smu_dpm_clks *smu_dpm_clks) 529 524 { 530 525 DpmClocks_t_dcn42 *table = smu_dpm_clks->dpm_clks; ··· 838 833 839 834 void dcn42_exit_low_power_state(struct clk_mgr *clk_mgr_base) 840 835 { 836 + (void)clk_mgr_base; 841 837 842 838 } 843 839 ··· 848 842 849 843 } 850 844 851 - static void dcn42_update_clocks_fpga(struct clk_mgr *clk_mgr, 845 + void dcn42_update_clocks_fpga(struct clk_mgr *clk_mgr, 852 846 struct dc_state *context, 853 847 bool safe_to_lower) 854 848 { ··· 901 895 // Both fclk and ref_dppclk run on the same scemi clock. 902 896 clk_mgr_int->dccg->ref_dppclk = clk_mgr->clks.fclk_khz; 903 897 904 - /* TODO: set dtbclk in correct place */ 905 - clk_mgr->clks.dtbclk_en = true; 906 - 907 898 dm_set_dcn_clocks(clk_mgr->ctx, &clk_mgr->clks); 899 + if (clk_mgr->clks.dtbclk_en) { 900 + dcn42_update_clocks_update_dtb_dto(clk_mgr_int, context, clk_mgr->clks.ref_dtbclk_khz); 901 + } else { 902 + clk_mgr->clks.ref_dtbclk_khz = 0; 903 + } 908 904 dcn42_update_clocks_update_dpp_dto(clk_mgr_int, context, safe_to_lower); 909 - 910 - dcn42_update_clocks_update_dtb_dto(clk_mgr_int, context, clk_mgr->clks.ref_dtbclk_khz); 911 905 } 912 906 913 907 unsigned int dcn42_get_max_clock_khz(struct clk_mgr *clk_mgr_base, enum clk_type clk_type) ··· 939 933 return 0; 940 934 } 941 935 942 - static int dcn42_get_dispclk_from_dentist(struct clk_mgr *clk_mgr_base) 936 + int dcn42_get_dispclk_from_dentist(struct clk_mgr *clk_mgr_base) 943 937 { 938 + (void)clk_mgr_base; 944 939 struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); 945 940 uint32_t dispclk_wdivider; 946 941 int disp_divider; ··· 961 954 return clk_mgr->smu_present; 962 955 } 963 956 964 - static void dcn42_get_smu_clocks(struct clk_mgr_internal *clk_mgr_int) 957 + void dcn42_get_smu_clocks(struct clk_mgr_internal *clk_mgr_int) 965 958 { 966 959 struct clk_mgr *clk_mgr_base = &clk_mgr_int->base; 967 960 struct dcn42_smu_dpm_clks smu_dpm_clks = { 0 };
+8 -1
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn42/dcn42_clk_mgr.h
··· 27 27 #include "clk_mgr_internal.h" 28 28 29 29 #define NUM_CLOCK_SOURCES 5 30 + #define DCN42_CLKIP_REFCLK 48000 30 31 31 32 struct dcn42_watermarks; 32 33 ··· 72 71 void dcn42_exit_low_power_state(struct clk_mgr *clk_mgr_base); 73 72 unsigned int dcn42_get_max_clock_khz(struct clk_mgr *clk_mgr_base, enum clk_type clk_type); 74 73 bool dcn42_is_smu_present(struct clk_mgr *clk_mgr_base); 74 + bool dcn42_has_active_display(struct dc *dc, const struct dc_state *context); 75 75 int dcn42_get_active_display_cnt_wa(struct dc *dc, struct dc_state *context, int *all_active_disps); 76 + bool dcn42_has_active_display(struct dc *dc, const struct dc_state *context); 76 77 void dcn42_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr, struct dc_state *context, bool safe_to_lower); 77 78 void dcn42_update_clocks_update_dtb_dto(struct clk_mgr_internal *clk_mgr, struct dc_state *context, int ref_dtbclk_khz); 78 79 bool dcn42_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base); 79 - bool dcn42_has_active_display(struct dc *dc, const struct dc_state *context); 80 + struct dcn42_smu_dpm_clks; /* Forward declaration for pointer parameter below */ 81 + void dcn42_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr, struct dcn42_smu_dpm_clks *smu_dpm_clks); 82 + void dcn42_get_smu_clocks(struct clk_mgr_internal *clk_mgr_int); 83 + void dcn42_update_clocks_fpga(struct clk_mgr *clk_mgr, struct dc_state *context, bool safe_to_lower); 84 + int dcn42_get_dispclk_from_dentist(struct clk_mgr *clk_mgr_base); 80 85 #endif //__DCN42_CLK_MGR_H__
+372 -209
drivers/gpu/drm/amd/display/dc/core/dc.c
··· 1096 1096 #ifdef CONFIG_DRM_AMD_DC_FP 1097 1097 dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present; 1098 1098 1099 - if (dc->res_pool->funcs->update_bw_bounding_box) { 1100 - DC_FP_START(); 1099 + if (dc->res_pool->funcs->update_bw_bounding_box) 1101 1100 dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params); 1102 - DC_FP_END(); 1103 - } 1104 1101 dc->soc_and_ip_translator = dc_create_soc_and_ip_translator(dc_ctx->dce_version); 1105 1102 if (!dc->soc_and_ip_translator) 1106 1103 goto fail; ··· 1134 1137 struct dc_stream_state *stream, 1135 1138 struct dc_state *context) 1136 1139 { 1140 + (void)dc; 1141 + (void)context; 1137 1142 int i; 1138 1143 1139 1144 for (i = 0; i < stream->num_wb_info; i++) ··· 1147 1148 struct dc_stream_state *stream, 1148 1149 bool lock) 1149 1150 { 1151 + (void)dc; 1152 + (void)context; 1150 1153 int i; 1151 1154 1152 1155 /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */ ··· 1564 1563 struct dc_link *edp_links[MAX_NUM_EDP]; 1565 1564 struct dc_link *edp_link = NULL; 1566 1565 enum dc_connection_type type; 1567 - int i; 1568 - int edp_num; 1566 + unsigned int i, edp_num; 1569 1567 1570 1568 dc_get_edp_links(dc, edp_links, &edp_num); 1571 1569 if (!edp_num) ··· 1923 1923 return false; 1924 1924 } 1925 1925 1926 - /* block DSC for now, as VBIOS does not currently support DSC timings */ 1927 1926 if (crtc_timing->flags.DSC) { 1928 - DC_LOG_DEBUG("boot timing validation failed due to DSC\n"); 1929 - return false; 1927 + struct display_stream_compressor *dsc = NULL; 1928 + struct dcn_dsc_state dsc_state = {0}; 1929 + 1930 + /* Find DSC associated with this timing generator */ 1931 + if (tg_inst < dc->res_pool->res_cap->num_dsc) { 1932 + dsc = dc->res_pool->dscs[tg_inst]; 1933 + } 1934 + 1935 + if (!dsc || !dsc->funcs->dsc_read_state) { 1936 + DC_LOG_DEBUG("boot timing validation failed due to no DSC resource or read function\n"); 1937 + return false; 1938 + } 1939 + 1940 + /* Read current DSC hardware state */ 1941 + dsc->funcs->dsc_read_state(dsc, &dsc_state); 1942 + 1943 + /* Check if DSC is actually enabled in hardware */ 1944 + if (dsc_state.dsc_clock_en == 0) { 1945 + DC_LOG_DEBUG("boot timing validation failed due to DSC not enabled in hardware\n"); 1946 + return false; 1947 + } 1948 + 1949 + uint32_t num_slices_h = 0; 1950 + uint32_t num_slices_v = 0; 1951 + 1952 + if (dsc_state.dsc_slice_width > 0) { 1953 + num_slices_h = (crtc_timing->h_addressable + dsc_state.dsc_slice_width - 1) / dsc_state.dsc_slice_width; 1954 + } 1955 + 1956 + if (dsc_state.dsc_slice_height > 0) { 1957 + num_slices_v = (crtc_timing->v_addressable + dsc_state.dsc_slice_height - 1) / dsc_state.dsc_slice_height; 1958 + } 1959 + 1960 + if (crtc_timing->dsc_cfg.num_slices_h != num_slices_h) { 1961 + DC_LOG_DEBUG("boot timing validation failed due to num_slices_h mismatch\n"); 1962 + return false; 1963 + } 1964 + 1965 + if (crtc_timing->dsc_cfg.num_slices_v != num_slices_v) { 1966 + DC_LOG_DEBUG("boot timing validation failed due to num_slices_v mismatch\n"); 1967 + return false; 1968 + } 1969 + 1970 + if (crtc_timing->dsc_cfg.bits_per_pixel != dsc_state.dsc_bits_per_pixel) { 1971 + DC_LOG_DEBUG("boot timing validation failed due to bits_per_pixel mismatch\n"); 1972 + return false; 1973 + } 1974 + 1975 + if (crtc_timing->dsc_cfg.block_pred_enable != dsc_state.dsc_block_pred_enable) { 1976 + DC_LOG_DEBUG("boot timing validation failed due to block_pred_enable mismatch\n"); 1977 + return false; 1978 + } 1979 + 1980 + if (crtc_timing->dsc_cfg.linebuf_depth != dsc_state.dsc_line_buf_depth) { 1981 + DC_LOG_DEBUG("boot timing validation failed due to linebuf_depth mismatch\n"); 1982 + return false; 1983 + } 1984 + 1985 + if (crtc_timing->dsc_cfg.version_minor != dsc_state.dsc_version_minor) { 1986 + DC_LOG_DEBUG("boot timing validation failed due to version_minor mismatch\n"); 1987 + return false; 1988 + } 1989 + 1990 + if (crtc_timing->dsc_cfg.ycbcr422_simple != dsc_state.dsc_simple_422) { 1991 + DC_LOG_DEBUG("boot timing validation failed due to pixel encoding mismatch\n"); 1992 + return false; 1993 + } 1994 + 1995 + // Skip checks for is_frl, is_dp, and rc_buffer_size which are not programmed by vbios 1996 + // or not necessary for seamless boot validation. 1930 1997 } 1931 1998 1932 1999 if (dc_is_dp_signal(link->connector_signal)) { ··· 2762 2695 static struct surface_update_descriptor get_plane_info_update_type(const struct dc_surface_update *u) 2763 2696 { 2764 2697 union surface_update_flags *update_flags = &u->surface->update_flags; 2765 - struct surface_update_descriptor update_type = { UPDATE_TYPE_ADDR_ONLY, LOCK_DESCRIPTOR_NONE }; 2698 + struct surface_update_descriptor update_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE }; 2766 2699 2767 2700 if (!u->plane_info) 2768 2701 return update_type; ··· 2854 2787 const struct dc_surface_update *u) 2855 2788 { 2856 2789 union surface_update_flags *update_flags = &u->surface->update_flags; 2857 - struct surface_update_descriptor update_type = { UPDATE_TYPE_ADDR_ONLY, LOCK_DESCRIPTOR_NONE }; 2790 + struct surface_update_descriptor update_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE }; 2858 2791 2859 2792 if (!u->scaling_info) 2860 2793 return update_type; ··· 2905 2838 return update_type; 2906 2839 } 2907 2840 2908 - static struct surface_update_descriptor check_update_surface( 2841 + static struct surface_update_descriptor det_surface_update( 2909 2842 const struct dc_check_config *check_config, 2910 2843 struct dc_surface_update *u) 2911 2844 { 2912 - struct surface_update_descriptor overall_type = { UPDATE_TYPE_ADDR_ONLY, LOCK_DESCRIPTOR_NONE }; 2845 + struct surface_update_descriptor overall_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE }; 2913 2846 union surface_update_flags *update_flags = &u->surface->update_flags; 2914 2847 2915 2848 if (u->surface->force_full_update) { ··· 2929 2862 2930 2863 if (u->flip_addr) { 2931 2864 update_flags->bits.addr_update = 1; 2932 - elevate_update_type(&overall_type, UPDATE_TYPE_ADDR_ONLY, LOCK_DESCRIPTOR_STREAM); 2865 + elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); 2933 2866 2934 2867 if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) { 2935 2868 update_flags->bits.tmz_changed = 1; ··· 2943 2876 2944 2877 if (u->input_csc_color_matrix) { 2945 2878 update_flags->bits.input_csc_change = 1; 2946 - elevate_update_type(&overall_type, 2947 - check_config->enable_legacy_fast_update ? UPDATE_TYPE_MED : UPDATE_TYPE_FAST, 2948 - LOCK_DESCRIPTOR_STREAM); 2949 - } 2950 - 2951 - if (u->cursor_csc_color_matrix) { 2952 - elevate_update_type(&overall_type, 2953 - check_config->enable_legacy_fast_update ? UPDATE_TYPE_MED : UPDATE_TYPE_FAST, 2954 - LOCK_DESCRIPTOR_STREAM); 2879 + elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); 2955 2880 } 2956 2881 2957 2882 if (u->coeff_reduction_factor) { 2958 2883 update_flags->bits.coeff_reduction_change = 1; 2959 - elevate_update_type(&overall_type, 2960 - check_config->enable_legacy_fast_update ? UPDATE_TYPE_MED : UPDATE_TYPE_FAST, 2961 - LOCK_DESCRIPTOR_STREAM); 2884 + elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); 2962 2885 } 2963 2886 2964 2887 if (u->gamut_remap_matrix) { 2965 2888 update_flags->bits.gamut_remap_change = 1; 2966 - elevate_update_type(&overall_type, 2967 - check_config->enable_legacy_fast_update ? UPDATE_TYPE_MED : UPDATE_TYPE_FAST, 2968 - LOCK_DESCRIPTOR_STREAM); 2889 + elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); 2969 2890 } 2970 2891 2971 2892 if (u->cm || (u->gamma && dce_use_lut(u->plane_info ? u->plane_info->format : u->surface->format))) { 2972 2893 update_flags->bits.gamma_change = 1; 2973 - elevate_update_type(&overall_type, 2974 - check_config->enable_legacy_fast_update ? UPDATE_TYPE_MED : UPDATE_TYPE_FAST, 2975 - LOCK_DESCRIPTOR_STREAM); 2894 + elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); 2976 2895 } 2977 2896 2978 2897 if (u->cm && (u->cm->flags.bits.lut3d_enable || u->surface->cm.flags.bits.lut3d_enable)) { 2979 2898 update_flags->bits.lut_3d = 1; 2980 - elevate_update_type(&overall_type, 2981 - check_config->enable_legacy_fast_update ? UPDATE_TYPE_MED : UPDATE_TYPE_FAST, 2982 - LOCK_DESCRIPTOR_STREAM); 2899 + elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); 2983 2900 } 2984 2901 2985 2902 if (u->cm && u->cm->flags.bits.lut3d_dma_enable != u->surface->cm.flags.bits.lut3d_dma_enable && ··· 2979 2928 2980 2929 if (u->hdr_mult.value) 2981 2930 if (u->hdr_mult.value != u->surface->hdr_mult.value) { 2931 + // TODO: Should be fast? 2982 2932 update_flags->bits.hdr_mult = 1; 2983 - elevate_update_type(&overall_type, 2984 - check_config->enable_legacy_fast_update ? UPDATE_TYPE_MED : UPDATE_TYPE_FAST, 2985 - LOCK_DESCRIPTOR_STREAM); 2933 + elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM); 2986 2934 } 2987 2935 2988 2936 if (u->sdr_white_level_nits) ··· 3011 2961 */ 3012 2962 static void force_immediate_gsl_plane_flip(struct dc *dc, struct dc_surface_update *updates, int surface_count) 3013 2963 { 2964 + (void)dc; 3014 2965 bool has_flip_immediate_plane = false; 3015 2966 int i; 3016 2967 ··· 3036 2985 int surface_count, 3037 2986 struct dc_stream_update *stream_update) 3038 2987 { 3039 - struct surface_update_descriptor overall_type = { UPDATE_TYPE_ADDR_ONLY, LOCK_DESCRIPTOR_NONE }; 2988 + struct surface_update_descriptor overall_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE }; 3040 2989 3041 2990 /* When countdown finishes, promote this flip to full to trigger deferred final transition */ 3042 2991 if (check_config->deferred_transition_state && !check_config->transition_countdown_to_steady_state) { ··· 3103 3052 if (su_flags->raw) 3104 3053 elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); 3105 3054 3106 - /* Non-global cases */ 3107 - if (stream_update->hdr_static_metadata || 3108 - stream_update->vrr_infopacket || 3109 - stream_update->vsc_infopacket || 3110 - stream_update->vsp_infopacket || 3111 - stream_update->hfvsif_infopacket || 3112 - stream_update->adaptive_sync_infopacket || 3113 - stream_update->vtem_infopacket || 3114 - stream_update->avi_infopacket) { 3115 - elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM); 3116 - } 3117 - 3055 + // Non-global cases 3118 3056 if (stream_update->output_csc_transform) { 3119 3057 su_flags->bits.out_csc = 1; 3120 3058 elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); ··· 3113 3073 su_flags->bits.out_tf = 1; 3114 3074 elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); 3115 3075 } 3116 - 3117 - if (stream_update->periodic_interrupt) { 3118 - elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM); 3119 - } 3120 - 3121 - if (stream_update->dither_option) { 3122 - elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM); 3123 - } 3124 - 3125 - if (stream_update->cursor_position || stream_update->cursor_attributes) { 3126 - elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM); 3127 - } 3128 - 3129 - /* TODO - cleanup post blend CM */ 3130 - if (stream_update->func_shaper || stream_update->lut3d_func) { 3131 - elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM); 3132 - } 3133 - 3134 - if (stream_update->pending_test_pattern) { 3135 - elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); 3136 - } 3137 3076 } 3138 3077 3139 3078 for (int i = 0 ; i < surface_count; i++) { 3140 3079 struct surface_update_descriptor inner_type = 3141 - check_update_surface(check_config, &updates[i]); 3080 + det_surface_update(check_config, &updates[i]); 3142 3081 3143 3082 elevate_update_type(&overall_type, inner_type.update_type, inner_type.lock_descriptor); 3144 3083 } ··· 3142 3123 updates[i].surface->update_flags.raw = 0; 3143 3124 3144 3125 return check_update_surfaces_for_stream(check_config, updates, surface_count, stream_update); 3145 - } 3146 - 3147 - /* 3148 - * check_update_state_and_surfaces_for_stream() - Determine update type (fast, med, or full) 3149 - * 3150 - * This function performs checks on the DC global state, and is therefore not re-entrant. It 3151 - * should not be called from DM. 3152 - * 3153 - * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types 3154 - */ 3155 - static struct surface_update_descriptor check_update_state_and_surfaces_for_stream( 3156 - const struct dc *dc, 3157 - const struct dc_check_config *check_config, 3158 - const struct dc_stream_state *stream, 3159 - const struct dc_surface_update *updates, 3160 - const int surface_count, 3161 - const struct dc_stream_update *stream_update) 3162 - { 3163 - const struct dc_state *context = dc->current_state; 3164 - 3165 - struct surface_update_descriptor overall_type = { UPDATE_TYPE_ADDR_ONLY, LOCK_DESCRIPTOR_NONE}; 3166 - 3167 - if (updates) 3168 - for (int i = 0; i < surface_count; i++) 3169 - if (!is_surface_in_context(context, updates[i].surface)) 3170 - elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); 3171 - 3172 - if (stream) { 3173 - const struct dc_stream_status *stream_status = dc_stream_get_status_const(stream); 3174 - if (stream_status == NULL || stream_status->plane_count != surface_count) 3175 - elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); 3176 - } 3177 - if (dc->idle_optimizations_allowed) 3178 - elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); 3179 - 3180 - if (dc_can_clear_cursor_limit(dc)) 3181 - elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); 3182 - 3183 - return overall_type; 3184 - } 3185 - 3186 - /* 3187 - * dc_check_update_state_and_surfaces_for_stream() - Determine update type (fast, med, or full) 3188 - * 3189 - * This function performs checks on the DC global state, stream and surface update, and is 3190 - * therefore not re-entrant. It should not be called from DM. 3191 - * 3192 - * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types 3193 - */ 3194 - static struct surface_update_descriptor dc_check_update_state_and_surfaces_for_stream( 3195 - const struct dc *dc, 3196 - const struct dc_check_config *check_config, 3197 - struct dc_stream_state *stream, 3198 - struct dc_surface_update *updates, 3199 - int surface_count, 3200 - struct dc_stream_update *stream_update) 3201 - { 3202 - /* check updates against the entire DC state (global) first */ 3203 - struct surface_update_descriptor overall_update_type = check_update_state_and_surfaces_for_stream( 3204 - dc, 3205 - check_config, 3206 - stream, 3207 - updates, 3208 - surface_count, 3209 - stream_update); 3210 - 3211 - /* check updates for stream and plane */ 3212 - struct surface_update_descriptor stream_update_type = dc_check_update_surfaces_for_stream( 3213 - check_config, 3214 - updates, 3215 - surface_count, 3216 - stream_update); 3217 - elevate_update_type(&overall_update_type, stream_update_type.update_type, stream_update_type.lock_descriptor); 3218 - 3219 - return overall_update_type; 3220 3126 } 3221 3127 3222 3128 static struct dc_stream_status *stream_get_status( ··· 3290 3346 struct dc_stream_state *stream, 3291 3347 struct dc_stream_update *update) 3292 3348 { 3349 + (void)context; 3293 3350 struct dc_context *dc_ctx = dc->ctx; 3294 3351 3295 3352 if (update == NULL || stream == NULL) ··· 3501 3556 } 3502 3557 } 3503 3558 3559 + static bool full_update_required_weak( 3560 + const struct dc *dc, 3561 + const struct dc_surface_update *srf_updates, 3562 + int surface_count, 3563 + const struct dc_stream_update *stream_update, 3564 + const struct dc_stream_state *stream); 3565 + 3504 3566 struct pipe_split_policy_backup { 3505 3567 bool dynamic_odm_policy; 3506 3568 bool subvp_policy; ··· 3566 3614 * @surface_count: surface update count 3567 3615 * @stream: Corresponding stream to be updated 3568 3616 * @stream_update: stream update 3617 + * @update_descriptor: describes what plane and stream changes to apply 3569 3618 * @new_update_type: [out] determined update type by the function 3570 3619 * @new_context: [out] new context allocated and validated if update type is 3571 3620 * FULL, reference to current context if update type is less than FULL. ··· 3578 3625 struct dc_surface_update *srf_updates, int surface_count, 3579 3626 struct dc_stream_state *stream, 3580 3627 struct dc_stream_update *stream_update, 3581 - struct surface_update_descriptor *update_descriptor, 3628 + enum surface_update_type *new_update_type, 3582 3629 struct dc_state **new_context) 3583 3630 { 3584 3631 struct dc_state *context; 3585 3632 int i, j; 3633 + enum surface_update_type update_type; 3586 3634 const struct dc_stream_status *stream_status; 3587 3635 struct dc_context *dc_ctx = dc->ctx; 3588 3636 ··· 3597 3643 } 3598 3644 3599 3645 context = dc->current_state; 3600 - *update_descriptor = dc_check_update_state_and_surfaces_for_stream( 3601 - dc, 3602 - &dc->check_config, 3603 - stream, 3604 - srf_updates, 3605 - surface_count, 3606 - stream_update); 3646 + update_type = dc_check_update_surfaces_for_stream( 3647 + &dc->check_config, srf_updates, surface_count, stream_update).update_type; 3648 + if (full_update_required_weak(dc, srf_updates, surface_count, stream_update, stream)) 3649 + update_type = UPDATE_TYPE_FULL; 3607 3650 3608 3651 /* It is possible to receive a flip for one plane while there are multiple flip_immediate planes in the same stream. 3609 3652 * E.g. Desktop and MPO plane are flip_immediate but only the MPO plane received a flip 3610 3653 * Force the other flip_immediate planes to flip so GSL doesn't wait for a flip that won't come. 3611 3654 */ 3612 3655 force_immediate_gsl_plane_flip(dc, srf_updates, surface_count); 3613 - if (update_descriptor->update_type == UPDATE_TYPE_FULL) 3656 + if (update_type == UPDATE_TYPE_FULL) 3614 3657 backup_planes_and_stream_state(&dc->scratch.current_state, stream); 3615 3658 3616 3659 /* update current stream with the new updates */ ··· 3633 3682 } 3634 3683 } 3635 3684 3636 - if (update_descriptor->update_type == UPDATE_TYPE_FULL) { 3685 + if (update_type == UPDATE_TYPE_FULL) { 3637 3686 if (stream_update) { 3638 3687 uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed; 3639 3688 stream_update->stream->update_flags.raw = 0xFFFFFFFF; ··· 3643 3692 srf_updates[i].surface->update_flags.raw = 0xFFFFFFFF; 3644 3693 } 3645 3694 3646 - if (update_descriptor->update_type >= update_surface_trace_level) 3695 + if (update_type >= update_surface_trace_level) 3647 3696 update_surface_trace(dc, srf_updates, surface_count); 3648 3697 3649 3698 for (i = 0; i < surface_count; i++) 3650 3699 copy_surface_update_to_plane(srf_updates[i].surface, &srf_updates[i]); 3651 3700 3652 - if (update_descriptor->update_type >= UPDATE_TYPE_FULL) { 3701 + if (update_type >= UPDATE_TYPE_FULL) { 3653 3702 struct dc_plane_state *new_planes[MAX_SURFACES] = {0}; 3654 3703 3655 3704 for (i = 0; i < surface_count; i++) ··· 3687 3736 for (i = 0; i < surface_count; i++) { 3688 3737 struct dc_plane_state *surface = srf_updates[i].surface; 3689 3738 3690 - if (update_descriptor->update_type != UPDATE_TYPE_MED) 3739 + if (update_type != UPDATE_TYPE_MED) 3691 3740 continue; 3692 3741 if (surface->update_flags.bits.position_change) { 3693 3742 for (j = 0; j < dc->res_pool->pipe_count; j++) { ··· 3701 3750 } 3702 3751 } 3703 3752 3704 - if (update_descriptor->update_type == UPDATE_TYPE_FULL) { 3753 + if (update_type == UPDATE_TYPE_FULL) { 3705 3754 struct pipe_split_policy_backup policy; 3706 3755 bool minimize = false; 3707 3756 ··· 3730 3779 update_seamless_boot_flags(dc, context, surface_count, stream); 3731 3780 3732 3781 *new_context = context; 3733 - if (update_descriptor->update_type == UPDATE_TYPE_FULL) 3782 + *new_update_type = update_type; 3783 + if (update_type == UPDATE_TYPE_FULL) 3734 3784 backup_planes_and_stream_state(&dc->scratch.new_state, stream); 3735 3785 3736 3786 return true; ··· 3811 3859 program_cursor_position(dc, stream); 3812 3860 3813 3861 /* Full fe update*/ 3814 - if (update_type <= UPDATE_TYPE_FAST) 3862 + if (update_type == UPDATE_TYPE_FAST) 3815 3863 continue; 3816 3864 3817 3865 if (stream_update->dsc_config) ··· 3895 3943 3896 3944 static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_state *stream) 3897 3945 { 3946 + (void)dc; 3898 3947 if ((stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 3899 3948 || stream->link->psr_settings.psr_version == DC_PSR_VERSION_1) 3900 3949 && stream->ctx->dce_version >= DCN_VERSION_3_1) ··· 4121 4168 struct pipe_ctx *top_pipe_to_program = NULL; 4122 4169 struct dc_stream_status *stream_status = NULL; 4123 4170 bool should_offload_fams2_flip = false; 4124 - bool should_lock_all_pipes = (update_type > UPDATE_TYPE_FAST); 4171 + bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST); 4125 4172 4126 4173 if (should_lock_all_pipes) 4127 4174 determine_pipe_unlock_order(dc, context); ··· 4181 4228 continue; 4182 4229 4183 4230 pipe_ctx->plane_state->triplebuffer_flips = false; 4184 - if (update_type <= UPDATE_TYPE_FAST && 4231 + if (update_type == UPDATE_TYPE_FAST && 4185 4232 dc->hwss.program_triplebuffer != NULL && 4186 4233 !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { 4187 4234 /*triple buffer for VUpdate only*/ ··· 4238 4285 { 4239 4286 int i, j; 4240 4287 struct pipe_ctx *top_pipe_to_program = NULL; 4241 - bool should_lock_all_pipes = (update_type > UPDATE_TYPE_FAST); 4288 + bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST); 4242 4289 bool subvp_prev_use = false; 4243 4290 bool subvp_curr_use = false; 4244 4291 uint8_t current_stream_mask = 0; ··· 4255 4302 if (update_type == UPDATE_TYPE_FULL && dc->optimized_required) 4256 4303 hwss_process_outstanding_hw_updates(dc, dc->current_state); 4257 4304 4258 - if (update_type > UPDATE_TYPE_FAST && dc->res_pool->funcs->prepare_mcache_programming) 4305 + if (update_type != UPDATE_TYPE_FAST && dc->res_pool->funcs->prepare_mcache_programming) 4259 4306 dc->res_pool->funcs->prepare_mcache_programming(dc, context); 4260 4307 4261 4308 for (i = 0; i < dc->res_pool->pipe_count; i++) { ··· 4317 4364 odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU; 4318 4365 } 4319 4366 4320 - if ((update_type > UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) 4367 + if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) 4321 4368 if (top_pipe_to_program && 4322 4369 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { 4323 4370 if (should_use_dmub_inbox1_lock(dc, stream->link)) { ··· 4388 4435 } 4389 4436 dc->hwss.post_unlock_program_front_end(dc, context); 4390 4437 4391 - if (update_type > UPDATE_TYPE_FAST) 4438 + if (update_type != UPDATE_TYPE_FAST) 4392 4439 if (dc->hwss.commit_subvp_config) 4393 4440 dc->hwss.commit_subvp_config(dc, context); 4394 4441 ··· 4404 4451 return; 4405 4452 } 4406 4453 4407 - if (update_type > UPDATE_TYPE_FAST) { 4454 + if (update_type != UPDATE_TYPE_FAST) { 4408 4455 for (j = 0; j < dc->res_pool->pipe_count; j++) { 4409 4456 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; 4410 4457 ··· 4432 4479 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state)) 4433 4480 continue; 4434 4481 pipe_ctx->plane_state->triplebuffer_flips = false; 4435 - if (update_type <= UPDATE_TYPE_FAST && 4482 + if (update_type == UPDATE_TYPE_FAST && 4436 4483 dc->hwss.program_triplebuffer != NULL && 4437 4484 !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) { 4438 4485 /*triple buffer for VUpdate only*/ ··· 4459 4506 continue; 4460 4507 4461 4508 /* Full fe update*/ 4462 - if (update_type <= UPDATE_TYPE_FAST) 4509 + if (update_type == UPDATE_TYPE_FAST) 4463 4510 continue; 4464 4511 4465 4512 stream_status = ··· 4478 4525 continue; 4479 4526 4480 4527 /* Full fe update*/ 4481 - if (update_type <= UPDATE_TYPE_FAST) 4528 + if (update_type == UPDATE_TYPE_FAST) 4482 4529 continue; 4483 4530 4484 4531 ASSERT(!pipe_ctx->plane_state->triplebuffer_flips); ··· 4489 4536 } 4490 4537 } 4491 4538 4492 - if (dc->hwss.program_front_end_for_ctx && update_type > UPDATE_TYPE_FAST) { 4539 + if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) { 4493 4540 dc->hwss.program_front_end_for_ctx(dc, context); 4494 4541 4495 4542 //Pipe busy until some frame and line # ··· 4517 4564 } 4518 4565 4519 4566 // Update Type FAST, Surface updates 4520 - if (update_type <= UPDATE_TYPE_FAST) { 4567 + if (update_type == UPDATE_TYPE_FAST) { 4521 4568 if (dc->hwss.set_flip_control_gsl) 4522 4569 for (i = 0; i < surface_count; i++) { 4523 4570 struct dc_plane_state *plane_state = srf_updates[i].surface; ··· 4554 4601 srf_updates[i].cm->flags.bits.lut3d_enable && 4555 4602 srf_updates[i].cm->flags.bits.lut3d_dma_enable && 4556 4603 dc->hwss.trigger_3dlut_dma_load) 4557 - dc->hwss.trigger_3dlut_dma_load(pipe_ctx); 4604 + dc->hwss.trigger_3dlut_dma_load(dc, pipe_ctx); 4558 4605 4559 4606 /*program triple buffer after lock based on flip type*/ 4560 4607 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { ··· 4574 4621 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false); 4575 4622 } 4576 4623 4577 - if ((update_type > UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) 4624 + if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) 4578 4625 if (top_pipe_to_program && 4579 4626 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { 4580 4627 top_pipe_to_program->stream_res.tg->funcs->wait_for_state( ··· 4607 4654 /* If enabling subvp or transitioning from subvp->subvp, enable the 4608 4655 * phantom streams before we program front end for the phantom pipes. 4609 4656 */ 4610 - if (update_type > UPDATE_TYPE_FAST) { 4657 + if (update_type != UPDATE_TYPE_FAST) { 4611 4658 if (dc->hwss.enable_phantom_streams) 4612 4659 dc->hwss.enable_phantom_streams(dc, context); 4613 4660 } 4614 4661 } 4615 4662 4616 - if (update_type > UPDATE_TYPE_FAST) 4663 + if (update_type != UPDATE_TYPE_FAST) 4617 4664 dc->hwss.post_unlock_program_front_end(dc, context); 4618 4665 4619 4666 if (subvp_prev_use && !subvp_curr_use) { ··· 4626 4673 dc->hwss.disable_phantom_streams(dc, context); 4627 4674 } 4628 4675 4629 - if (update_type > UPDATE_TYPE_FAST) 4676 + if (update_type != UPDATE_TYPE_FAST) 4630 4677 if (dc->hwss.commit_subvp_config) 4631 4678 dc->hwss.commit_subvp_config(dc, context); 4632 4679 /* Since phantom pipe programming is moved to post_unlock_program_front_end, ··· 4696 4743 int surface_count, 4697 4744 bool *is_plane_addition) 4698 4745 { 4746 + (void)srf_updates; 4699 4747 4700 4748 struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream); 4701 4749 bool force_minimal_pipe_splitting = false; ··· 5099 5145 return true; 5100 5146 } 5101 5147 5148 + void populate_fast_updates(struct dc_fast_update *fast_update, 5149 + struct dc_surface_update *srf_updates, 5150 + int surface_count, 5151 + struct dc_stream_update *stream_update) 5152 + { 5153 + int i = 0; 5154 + 5155 + if (stream_update) { 5156 + fast_update[0].out_transfer_func = stream_update->out_transfer_func; 5157 + fast_update[0].output_csc_transform = stream_update->output_csc_transform; 5158 + } else { 5159 + fast_update[0].out_transfer_func = NULL; 5160 + fast_update[0].output_csc_transform = NULL; 5161 + } 5162 + 5163 + for (i = 0; i < surface_count; i++) { 5164 + fast_update[i].flip_addr = srf_updates[i].flip_addr; 5165 + fast_update[i].gamma = srf_updates[i].gamma; 5166 + fast_update[i].gamut_remap_matrix = srf_updates[i].gamut_remap_matrix; 5167 + fast_update[i].input_csc_color_matrix = srf_updates[i].input_csc_color_matrix; 5168 + fast_update[i].coeff_reduction_factor = srf_updates[i].coeff_reduction_factor; 5169 + fast_update[i].cursor_csc_color_matrix = srf_updates[i].cursor_csc_color_matrix; 5170 + #if defined(CONFIG_DRM_AMD_DC_DCN4_2) 5171 + fast_update[i].cm_hist_control = srf_updates[i].cm_hist_control; 5172 + #endif 5173 + } 5174 + } 5175 + 5176 + static bool fast_updates_exist(const struct dc_fast_update *fast_update, int surface_count) 5177 + { 5178 + int i; 5179 + 5180 + if (fast_update[0].out_transfer_func || 5181 + fast_update[0].output_csc_transform) 5182 + return true; 5183 + 5184 + for (i = 0; i < surface_count; i++) { 5185 + if (fast_update[i].flip_addr || 5186 + fast_update[i].gamma || 5187 + fast_update[i].gamut_remap_matrix || 5188 + fast_update[i].input_csc_color_matrix || 5189 + fast_update[i].cursor_csc_color_matrix || 5190 + #if defined(CONFIG_DRM_AMD_DC_DCN4_2) 5191 + fast_update[i].cm_hist_control || 5192 + #endif 5193 + fast_update[i].coeff_reduction_factor) 5194 + return true; 5195 + } 5196 + 5197 + return false; 5198 + } 5199 + 5200 + bool fast_nonaddr_updates_exist(struct dc_fast_update *fast_update, int surface_count) 5201 + { 5202 + int i; 5203 + 5204 + if (fast_update[0].out_transfer_func || 5205 + fast_update[0].output_csc_transform) 5206 + return true; 5207 + 5208 + for (i = 0; i < surface_count; i++) { 5209 + if (fast_update[i].input_csc_color_matrix || 5210 + fast_update[i].gamma || 5211 + fast_update[i].gamut_remap_matrix || 5212 + fast_update[i].coeff_reduction_factor || 5213 + #if defined(CONFIG_DRM_AMD_DC_DCN4_2) 5214 + fast_update[i].cm_hist_control || 5215 + #endif 5216 + fast_update[i].cursor_csc_color_matrix) 5217 + return true; 5218 + } 5219 + 5220 + return false; 5221 + } 5222 + 5223 + static bool full_update_required_weak( 5224 + const struct dc *dc, 5225 + const struct dc_surface_update *srf_updates, 5226 + int surface_count, 5227 + const struct dc_stream_update *stream_update, 5228 + const struct dc_stream_state *stream) 5229 + { 5230 + (void)stream_update; 5231 + const struct dc_state *context = dc->current_state; 5232 + if (srf_updates) 5233 + for (int i = 0; i < surface_count; i++) 5234 + if (!is_surface_in_context(context, srf_updates[i].surface)) 5235 + return true; 5236 + 5237 + if (stream) { 5238 + const struct dc_stream_status *stream_status = dc_stream_get_status_const(stream); 5239 + if (stream_status == NULL || stream_status->plane_count != surface_count) 5240 + return true; 5241 + } 5242 + if (dc->idle_optimizations_allowed) 5243 + return true; 5244 + 5245 + if (dc_can_clear_cursor_limit(dc)) 5246 + return true; 5247 + 5248 + return false; 5249 + } 5250 + 5251 + static bool full_update_required( 5252 + const struct dc *dc, 5253 + const struct dc_surface_update *srf_updates, 5254 + int surface_count, 5255 + const struct dc_stream_update *stream_update, 5256 + const struct dc_stream_state *stream) 5257 + { 5258 + const union dc_plane_cm_flags blend_only_flags = { 5259 + .bits = { 5260 + .blend_enable = 1, 5261 + } 5262 + }; 5263 + 5264 + if (full_update_required_weak(dc, srf_updates, surface_count, stream_update, stream)) 5265 + return true; 5266 + 5267 + for (int i = 0; i < surface_count; i++) { 5268 + if (srf_updates && 5269 + (srf_updates[i].plane_info || 5270 + srf_updates[i].scaling_info || 5271 + (srf_updates[i].hdr_mult.value && 5272 + srf_updates[i].hdr_mult.value != srf_updates->surface->hdr_mult.value) || 5273 + (srf_updates[i].sdr_white_level_nits && 5274 + srf_updates[i].sdr_white_level_nits != srf_updates->surface->sdr_white_level_nits) || 5275 + srf_updates[i].in_transfer_func || 5276 + srf_updates[i].surface->force_full_update || 5277 + (srf_updates[i].flip_addr && 5278 + srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) || 5279 + (srf_updates[i].cm && 5280 + ((srf_updates[i].cm->flags.all != blend_only_flags.all && srf_updates[i].cm->flags.all != 0) || 5281 + (srf_updates[i].surface->cm.flags.all != blend_only_flags.all && srf_updates[i].surface->cm.flags.all != 0))))) 5282 + return true; 5283 + } 5284 + 5285 + if (stream_update && 5286 + (((stream_update->src.height != 0 && stream_update->src.width != 0) || 5287 + (stream_update->dst.height != 0 && stream_update->dst.width != 0) || 5288 + stream_update->integer_scaling_update) || 5289 + stream_update->hdr_static_metadata || 5290 + stream_update->abm_level || 5291 + stream_update->periodic_interrupt || 5292 + stream_update->vrr_infopacket || 5293 + stream_update->vsc_infopacket || 5294 + stream_update->vsp_infopacket || 5295 + stream_update->hfvsif_infopacket || 5296 + stream_update->vtem_infopacket || 5297 + stream_update->adaptive_sync_infopacket || 5298 + stream_update->avi_infopacket || 5299 + stream_update->dpms_off || 5300 + stream_update->allow_freesync || 5301 + stream_update->vrr_active_variable || 5302 + stream_update->vrr_active_fixed || 5303 + stream_update->gamut_remap || 5304 + stream_update->output_color_space || 5305 + stream_update->dither_option || 5306 + stream_update->wb_update || 5307 + stream_update->dsc_config || 5308 + stream_update->mst_bw_update || 5309 + stream_update->func_shaper || 5310 + stream_update->lut3d_func || 5311 + stream_update->pending_test_pattern || 5312 + stream_update->crtc_timing_adjust || 5313 + stream_update->scaler_sharpener_update || 5314 + stream_update->hw_cursor_req)) 5315 + return true; 5316 + 5317 + return false; 5318 + } 5319 + 5320 + static bool fast_update_only( 5321 + const struct dc *dc, 5322 + const struct dc_fast_update *fast_update, 5323 + const struct dc_surface_update *srf_updates, 5324 + int surface_count, 5325 + const struct dc_stream_update *stream_update, 5326 + const struct dc_stream_state *stream) 5327 + { 5328 + return fast_updates_exist(fast_update, surface_count) 5329 + && !full_update_required(dc, srf_updates, surface_count, stream_update, stream); 5330 + } 5331 + 5102 5332 static bool update_planes_and_stream_v2(struct dc *dc, 5103 5333 struct dc_surface_update *srf_updates, int surface_count, 5104 5334 struct dc_stream_state *stream, 5105 5335 struct dc_stream_update *stream_update) 5106 5336 { 5107 5337 struct dc_state *context; 5338 + enum surface_update_type update_type; 5339 + struct dc_fast_update fast_update[MAX_SURFACES] = {0}; 5108 5340 5109 5341 /* In cases where MPO and split or ODM are used transitions can 5110 5342 * cause underflow. Apply stream configuration with minimal pipe ··· 5298 5158 */ 5299 5159 bool force_minimal_pipe_splitting = 0; 5300 5160 bool is_plane_addition = 0; 5161 + bool is_fast_update_only; 5301 5162 5302 - struct surface_update_descriptor update_descriptor = {0}; 5303 - 5163 + populate_fast_updates(fast_update, srf_updates, surface_count, stream_update); 5164 + is_fast_update_only = fast_update_only(dc, fast_update, srf_updates, 5165 + surface_count, stream_update, stream); 5304 5166 force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes( 5305 5167 dc, 5306 5168 stream, ··· 5321 5179 surface_count, 5322 5180 stream, 5323 5181 stream_update, 5324 - &update_descriptor, 5182 + &update_type, 5325 5183 &context)) 5326 5184 return false; 5327 5185 ··· 5331 5189 dc_state_release(context); 5332 5190 return false; 5333 5191 } 5334 - elevate_update_type(&update_descriptor, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL); 5192 + update_type = UPDATE_TYPE_FULL; 5335 5193 } 5336 5194 5337 5195 if (dc->hwss.is_pipe_topology_transition_seamless && ··· 5340 5198 commit_minimal_transition_state_in_dc_update(dc, context, stream, 5341 5199 srf_updates, surface_count); 5342 5200 5343 - if (update_descriptor.update_type <= UPDATE_TYPE_FAST) { 5201 + if (is_fast_update_only && !dc->check_config.enable_legacy_fast_update) { 5344 5202 commit_planes_for_stream_fast(dc, 5345 5203 srf_updates, 5346 5204 surface_count, 5347 5205 stream, 5348 5206 stream_update, 5349 - update_descriptor.update_type, 5207 + update_type, 5350 5208 context); 5351 5209 } else { 5352 5210 if (!stream_update && ··· 5362 5220 surface_count, 5363 5221 stream, 5364 5222 stream_update, 5365 - update_descriptor.update_type, 5223 + update_type, 5366 5224 context); 5367 5225 } 5368 5226 if (dc->current_state != context) ··· 5376 5234 struct dc_stream_update *stream_update, 5377 5235 enum surface_update_type update_type) 5378 5236 { 5237 + struct dc_fast_update fast_update[MAX_SURFACES] = {0}; 5238 + 5379 5239 ASSERT(update_type < UPDATE_TYPE_FULL); 5380 - if (update_type <= UPDATE_TYPE_FAST) 5240 + populate_fast_updates(fast_update, srf_updates, surface_count, 5241 + stream_update); 5242 + if (fast_update_only(dc, fast_update, srf_updates, surface_count, 5243 + stream_update, stream) && 5244 + !dc->check_config.enable_legacy_fast_update) 5381 5245 commit_planes_for_stream_fast(dc, 5382 5246 srf_updates, 5383 5247 surface_count, ··· 5474 5326 struct dc_stream_update *stream_update) 5475 5327 { 5476 5328 struct dc_state *new_context; 5477 - struct surface_update_descriptor update_descriptor = {0}; 5329 + enum surface_update_type update_type; 5478 5330 5479 5331 /* 5480 5332 * When this function returns true and new_context is not equal to ··· 5486 5338 * replaced by a newer context. Refer to the use of 5487 5339 * swap_and_free_current_context below. 5488 5340 */ 5489 - if (!update_planes_and_stream_state(dc, 5490 - srf_updates, 5491 - surface_count, 5492 - stream, 5493 - stream_update, 5494 - &update_descriptor, 5341 + if (!update_planes_and_stream_state(dc, srf_updates, surface_count, 5342 + stream, stream_update, &update_type, 5495 5343 &new_context)) 5496 5344 return false; 5497 5345 5498 5346 if (new_context == dc->current_state) { 5499 5347 commit_planes_and_stream_update_on_current_context(dc, 5500 5348 srf_updates, surface_count, stream, 5501 - stream_update, update_descriptor.update_type); 5349 + stream_update, update_type); 5502 5350 5503 5351 if (dc->check_config.transition_countdown_to_steady_state) 5504 5352 dc->check_config.transition_countdown_to_steady_state--; 5505 5353 } else { 5506 5354 commit_planes_and_stream_update_with_new_context(dc, 5507 5355 srf_updates, surface_count, stream, 5508 - stream_update, update_descriptor.update_type, new_context); 5356 + stream_update, update_type, new_context); 5509 5357 } 5510 5358 5511 5359 return true; ··· 5551 5407 struct dc_stream_update *stream_update, 5552 5408 struct dc_state *state) 5553 5409 { 5410 + (void)state; 5554 5411 bool ret = false; 5555 5412 5556 5413 dc_exit_ips_for_hw_access(dc); ··· 5861 5716 5862 5717 static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memclk_mhz) 5863 5718 { 5719 + (void)apply; 5864 5720 struct dc_state *context = dc->current_state; 5865 5721 struct hubp *hubp; 5866 5722 struct pipe_ctx *pipe; ··· 6406 6260 */ 6407 6261 void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable) 6408 6262 { 6409 - int i; 6410 - int edp_num; 6263 + unsigned int i, edp_num; 6411 6264 struct pipe_ctx *pipe = NULL; 6412 6265 struct dc_link *link = stream->sink->link; 6413 6266 struct dc_link *edp_links[MAX_NUM_EDP]; ··· 6460 6315 struct dc_stream_state *stream, 6461 6316 struct abm_save_restore *pData) 6462 6317 { 6463 - int i; 6464 - int edp_num; 6318 + unsigned int i, edp_num; 6465 6319 struct pipe_ctx *pipe = NULL; 6466 6320 struct dc_link *link = stream->sink->link; 6467 6321 struct dc_link *edp_links[MAX_NUM_EDP]; ··· 6536 6392 void dc_set_edp_power(const struct dc *dc, struct dc_link *edp_link, 6537 6393 bool powerOn) 6538 6394 { 6395 + (void)dc; 6539 6396 if (edp_link->connector_signal != SIGNAL_TYPE_EDP) 6540 6397 return; 6541 6398 ··· 6663 6518 void dc_get_power_feature_status(struct dc *dc, int primary_otg_inst, 6664 6519 struct power_features *out_data) 6665 6520 { 6521 + (void)primary_otg_inst; 6666 6522 out_data->uclk_p_state = dc->current_state->clk_mgr->clks.p_state_change_support; 6667 6523 out_data->fams = dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching; 6668 6524 } ··· 7277 7131 struct dc_stream_update *stream_update; 7278 7132 bool update_v3; 7279 7133 bool do_clear_update_flags; 7280 - struct surface_update_descriptor update_descriptor; 7134 + enum surface_update_type update_type; 7281 7135 struct dc_state *new_context; 7282 7136 enum update_v3_flow flow; 7283 7137 struct dc_state *backup_context; ··· 7360 7214 ASSERT(scratch->flow == UPDATE_V3_FLOW_INVALID); 7361 7215 dc_exit_ips_for_hw_access(scratch->dc); 7362 7216 7217 + /* HWSS path determination needs to be done prior to updating the surface and stream states. */ 7218 + struct dc_fast_update fast_update[MAX_SURFACES] = { 0 }; 7219 + 7220 + populate_fast_updates(fast_update, 7221 + scratch->surface_updates, 7222 + scratch->surface_count, 7223 + scratch->stream_update); 7224 + 7225 + const bool is_hwss_fast_path_only = 7226 + fast_update_only(scratch->dc, 7227 + fast_update, 7228 + scratch->surface_updates, 7229 + scratch->surface_count, 7230 + scratch->stream_update, 7231 + scratch->stream) && 7232 + !scratch->dc->check_config.enable_legacy_fast_update; 7233 + 7363 7234 if (!update_planes_and_stream_state( 7364 7235 scratch->dc, 7365 7236 scratch->surface_updates, 7366 7237 scratch->surface_count, 7367 7238 scratch->stream, 7368 7239 scratch->stream_update, 7369 - &scratch->update_descriptor, 7240 + &scratch->update_type, 7370 7241 &scratch->new_context 7371 7242 )) { 7372 7243 return false; 7373 7244 } 7374 7245 7375 7246 if (scratch->new_context == scratch->dc->current_state) { 7376 - ASSERT(scratch->update_descriptor.update_type < UPDATE_TYPE_FULL); 7247 + ASSERT(scratch->update_type < UPDATE_TYPE_FULL); 7377 7248 7378 - scratch->flow = scratch->update_descriptor.update_type <= UPDATE_TYPE_FAST 7249 + scratch->flow = is_hwss_fast_path_only 7379 7250 ? UPDATE_V3_FLOW_NO_NEW_CONTEXT_CONTEXT_FAST 7380 7251 : UPDATE_V3_FLOW_NO_NEW_CONTEXT_CONTEXT_FULL; 7381 7252 return true; 7382 7253 } 7383 7254 7384 - ASSERT(scratch->update_descriptor.update_type >= UPDATE_TYPE_FULL); 7255 + ASSERT(scratch->update_type >= UPDATE_TYPE_FULL); 7385 7256 7386 7257 const bool seamless = scratch->dc->hwss.is_pipe_topology_transition_seamless( 7387 7258 scratch->dc, ··· 7471 7308 intermediate_update ? scratch->intermediate_count : scratch->surface_count, 7472 7309 scratch->stream, 7473 7310 use_stream_update ? scratch->stream_update : NULL, 7474 - intermediate_context ? UPDATE_TYPE_FULL : scratch->update_descriptor.update_type, 7311 + intermediate_context ? UPDATE_TYPE_FULL : scratch->update_type, 7475 7312 // `dc->current_state` only used in `NO_NEW_CONTEXT`, where it is equal to `new_context` 7476 7313 intermediate_context ? scratch->intermediate_context : scratch->new_context 7477 7314 ); ··· 7489 7326 scratch->surface_count, 7490 7327 scratch->stream, 7491 7328 scratch->stream_update, 7492 - scratch->update_descriptor.update_type, 7329 + scratch->update_type, 7493 7330 scratch->new_context 7494 7331 ); 7495 7332 break;
+1
drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
··· 249 249 enum dc_color_space colorspace, 250 250 struct tg_color *black_color) 251 251 { 252 + (void)dc; 252 253 switch (colorspace) { 253 254 case COLOR_SPACE_YCBCR601: 254 255 case COLOR_SPACE_YCBCR709:
+8 -1
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
··· 1748 1748 const struct dc *dc, 1749 1749 struct dc_state *context) 1750 1750 { 1751 + (void)dc; 1751 1752 int i; 1752 1753 1753 1754 for (i = 0; i < MAX_PIPES; i++) { ··· 1826 1825 struct resource_context *new_res_ctx, 1827 1826 const struct pipe_ctx *cur_otg_master) 1828 1827 { 1828 + (void)cur_res_ctx; 1829 1829 const struct pipe_ctx *cur_sec_opp_head = cur_otg_master->next_odm_pipe; 1830 1830 struct pipe_ctx *new_pipe; 1831 1831 int free_pipe_idx = FREE_PIPE_INDEX_NOT_FOUND; ··· 1848 1846 struct resource_context *new_res_ctx, 1849 1847 const struct pipe_ctx *cur_opp_head) 1850 1848 { 1849 + (void)cur_res_ctx; 1851 1850 const struct pipe_ctx *cur_sec_dpp = cur_opp_head->bottom_pipe; 1852 1851 struct pipe_ctx *new_pipe; 1853 1852 int free_pipe_idx = FREE_PIPE_INDEX_NOT_FOUND; ··· 2944 2941 const struct resource_pool *pool, 2945 2942 struct dc_stream_state *stream) 2946 2943 { 2944 + (void)pool; 2947 2945 struct dc *dc = stream->ctx->dc; 2948 2946 2949 2947 return dc->res_pool->funcs->add_stream_to_ctx(dc, new_ctx, stream); ··· 3027 3023 struct dc_plane_state *plane_state, 3028 3024 struct dc_state *context) 3029 3025 { 3026 + (void)context; 3030 3027 struct pipe_ctx *opp_head_pipe = otg_master_pipe; 3031 3028 3032 3029 while (opp_head_pipe) { ··· 3620 3615 const struct resource_pool *pool, 3621 3616 struct dc_stream_state *stream) 3622 3617 { 3618 + (void)stream; 3623 3619 int i; 3624 3620 3625 3621 for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) { ··· 3640 3634 enum engine_id id, 3641 3635 enum dce_version dc_version) 3642 3636 { 3637 + (void)dc_version; 3643 3638 int i, available_audio_count; 3644 3639 3645 3640 if (id == ENGINE_ID_UNKNOWN) ··· 5247 5240 return 64; 5248 5241 default: 5249 5242 ASSERT_CRITICAL(false); 5250 - return -1; 5243 + return UINT_MAX; 5251 5244 } 5252 5245 } 5253 5246 static unsigned int get_max_audio_sample_rate(struct audio_mode *modes)
+57 -20
drivers/gpu/drm/amd/display/dc/core/dc_state.c
··· 205 205 state->power_source = params ? params->power_source : DC_POWER_SOURCE_AC; 206 206 207 207 #ifdef CONFIG_DRM_AMD_DC_FP 208 + bool status; 209 + 208 210 if (dc->debug.using_dml2) { 209 - if (!dml2_create(dc, &dc->dml2_options, &state->bw_ctx.dml2)) { 211 + DC_FP_START(); 212 + status = dml2_create(dc, &dc->dml2_options, &state->bw_ctx.dml2); 213 + DC_FP_END(); 214 + 215 + if (!status) { 210 216 dc_state_release(state); 211 217 return NULL; 212 218 } 213 219 214 - if (dc->caps.dcmode_power_limits_present && !dml2_create(dc, &dc->dml2_dc_power_options, &state->bw_ctx.dml2_dc_power_source)) { 215 - dc_state_release(state); 216 - return NULL; 220 + if (dc->caps.dcmode_power_limits_present) { 221 + bool status; 222 + 223 + DC_FP_START(); 224 + status = dml2_create(dc, &dc->dml2_dc_power_options, &state->bw_ctx.dml2_dc_power_source); 225 + DC_FP_END(); 226 + 227 + if (!status) { 228 + dc_state_release(state); 229 + return NULL; 230 + } 217 231 } 232 + 218 233 } 219 - #endif 220 - 234 + #endif // CONFIG_DRM_AMD_DC_FP 221 235 kref_init(&state->refcount); 222 236 223 237 return state; ··· 249 235 250 236 #ifdef CONFIG_DRM_AMD_DC_FP 251 237 dst_state->bw_ctx.dml2 = dst_dml2; 252 - if (src_state->bw_ctx.dml2) 238 + if (src_state->bw_ctx.dml2) { 239 + DC_FP_START(); 253 240 dml2_copy(dst_state->bw_ctx.dml2, src_state->bw_ctx.dml2); 241 + DC_FP_END(); 242 + } 254 243 255 244 dst_state->bw_ctx.dml2_dc_power_source = dst_dml2_dc_power_source; 256 - if (src_state->bw_ctx.dml2_dc_power_source) 257 - dml2_copy(dst_state->bw_ctx.dml2_dc_power_source, src_state->bw_ctx.dml2_dc_power_source); 258 - #endif 259 245 246 + if (src_state->bw_ctx.dml2_dc_power_source) { 247 + DC_FP_START(); 248 + dml2_copy(dst_state->bw_ctx.dml2_dc_power_source, src_state->bw_ctx.dml2_dc_power_source); 249 + DC_FP_END(); 250 + } 251 + #endif // CONFIG_DRM_AMD_DC_FP 260 252 /* context refcount should not be overridden */ 261 253 dst_state->refcount = refcount; 262 254 } ··· 278 258 dc_state_copy_internal(new_state, src_state); 279 259 280 260 #ifdef CONFIG_DRM_AMD_DC_FP 261 + bool status; 262 + 281 263 new_state->bw_ctx.dml2 = NULL; 282 264 new_state->bw_ctx.dml2_dc_power_source = NULL; 283 265 284 - if (src_state->bw_ctx.dml2 && 285 - !dml2_create_copy(&new_state->bw_ctx.dml2, src_state->bw_ctx.dml2)) { 286 - dc_state_release(new_state); 287 - return NULL; 266 + if (src_state->bw_ctx.dml2) { 267 + DC_FP_START(); 268 + status = dml2_create_copy(&new_state->bw_ctx.dml2, src_state->bw_ctx.dml2); 269 + DC_FP_END(); 270 + 271 + if (!status) { 272 + dc_state_release(new_state); 273 + return NULL; 274 + } 288 275 } 289 276 290 - if (src_state->bw_ctx.dml2_dc_power_source && 291 - !dml2_create_copy(&new_state->bw_ctx.dml2_dc_power_source, src_state->bw_ctx.dml2_dc_power_source)) { 292 - dc_state_release(new_state); 293 - return NULL; 294 - } 295 - #endif 296 277 278 + if (src_state->bw_ctx.dml2_dc_power_source) { 279 + DC_FP_START(); 280 + status = dml2_create_copy(&new_state->bw_ctx.dml2_dc_power_source, 281 + src_state->bw_ctx.dml2_dc_power_source); 282 + DC_FP_END(); 283 + 284 + if (!status) { 285 + dc_state_release(new_state); 286 + return NULL; 287 + } 288 + } 289 + #endif // CONFIG_DRM_AMD_DC_FP 297 290 kref_init(&new_state->refcount); 298 291 299 292 return new_state; ··· 384 351 dc_state_destruct(state); 385 352 386 353 #ifdef CONFIG_DRM_AMD_DC_FP 354 + DC_FP_START(); 387 355 dml2_destroy(state->bw_ctx.dml2); 388 356 state->bw_ctx.dml2 = 0; 389 357 390 358 dml2_destroy(state->bw_ctx.dml2_dc_power_source); 391 359 state->bw_ctx.dml2_dc_power_source = 0; 360 + DC_FP_END(); 392 361 #endif 393 362 394 363 kvfree(state); ··· 409 374 struct dc_state *state, 410 375 struct dc_stream_state *stream) 411 376 { 377 + (void)dc; 412 378 enum dc_status res; 413 379 414 380 DC_LOGGER_INIT(dc->ctx->logger); ··· 785 749 struct dc_state *state, 786 750 struct dc_plane_state *main_plane) 787 751 { 752 + (void)main_plane; 788 753 struct dc_plane_state *phantom_plane = dc_create_plane_state(dc); 789 754 790 755 DC_LOGGER_INIT(dc->ctx->logger);
+11 -2
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
··· 42 42 #define MAX(x, y) ((x > y) ? x : y) 43 43 #endif 44 44 45 + #include "dc_fpu.h" 46 + 47 + #if !defined(DC_RUN_WITH_PREEMPTION_ENABLED) 48 + #define DC_RUN_WITH_PREEMPTION_ENABLED(code) code 49 + #endif // !DC_RUN_WITH_PREEMPTION_ENABLED 50 + 51 + 45 52 /******************************************************************************* 46 53 * Private functions 47 54 ******************************************************************************/ ··· 177 170 if (sink == NULL) 178 171 goto fail; 179 172 180 - stream = kzalloc_obj(struct dc_stream_state, GFP_ATOMIC); 173 + DC_RUN_WITH_PREEMPTION_ENABLED(stream = kzalloc_obj(struct dc_stream_state, GFP_ATOMIC)); 181 174 182 175 if (stream == NULL) 183 176 goto fail; 184 177 185 - stream->update_scratch = kzalloc((int32_t) dc_update_scratch_space_size(), GFP_ATOMIC); 178 + DC_RUN_WITH_PREEMPTION_ENABLED(stream->update_scratch = 179 + kzalloc((int32_t) dc_update_scratch_space_size(), 180 + GFP_ATOMIC)); 186 181 187 182 if (stream->update_scratch == NULL) 188 183 goto fail;
+1
drivers/gpu/drm/amd/display/dc/core/dc_surface.c
··· 57 57 58 58 void dc_plane_destruct(struct dc_plane_state *plane_state) 59 59 { 60 + (void)plane_state; 60 61 // no more pointers to free within dc_plane_state 61 62 } 62 63
+24 -6
drivers/gpu/drm/amd/display/dc/dc.h
··· 63 63 struct dcn_optc_reg_state; 64 64 struct dcn_dccg_reg_state; 65 65 66 - #define DC_VER "3.2.375" 66 + #define DC_VER "3.2.376" 67 67 68 68 /** 69 69 * MAX_SURFACES - representative of the upper bound of surfaces that can be piped to a single CRTC ··· 467 467 */ 468 468 469 469 enum surface_update_type { 470 - UPDATE_TYPE_ADDR_ONLY, /* only surface address is being updated, no other programming needed */ 471 470 UPDATE_TYPE_FAST, /* super fast, safe to execute in isr */ 472 471 UPDATE_TYPE_MED, /* ISR safe, most of programming needed, no bw/clk change*/ 473 472 UPDATE_TYPE_FULL, /* may need to shuffle resources */ ··· 520 521 union allow_lttpr_non_transparent_mode allow_lttpr_non_transparent_mode; 521 522 bool multi_mon_pp_mclk_switch; 522 523 bool disable_dmcu; 523 - bool enable_4to1MPC; 524 + bool allow_4to1MPC; 524 525 bool enable_windowed_mpo_odm; 525 526 bool forceHBR2CP2520; // Used for switching between test patterns TPS4 and CP2520 526 527 uint32_t allow_edp_hotplug_detection; ··· 562 563 bool frame_update_cmd_version2; 563 564 struct spl_sharpness_range dcn_sharpness_range; 564 565 struct spl_sharpness_range dcn_override_sharpness_range; 565 - bool no_native422_support; 566 566 }; 567 567 568 568 enum visual_confirm { ··· 986 988 * causing an issue or not. 987 989 */ 988 990 struct dc_debug_options { 991 + bool native422_support; 989 992 bool disable_dsc; 990 993 enum visual_confirm visual_confirm; 991 994 int visual_confirm_rect_height; ··· 1880 1881 struct scaling_taps scaling_quality; 1881 1882 }; 1882 1883 1884 + struct dc_fast_update { 1885 + const struct dc_flip_addrs *flip_addr; 1886 + const struct dc_gamma *gamma; 1887 + const struct colorspace_transform *gamut_remap_matrix; 1888 + const struct dc_csc_transform *input_csc_color_matrix; 1889 + const struct fixed31_32 *coeff_reduction_factor; 1890 + struct dc_transfer_func *out_transfer_func; 1891 + struct dc_csc_transform *output_csc_transform; 1892 + const struct dc_csc_transform *cursor_csc_color_matrix; 1893 + #if defined(CONFIG_DRM_AMD_DC_DCN4_2) 1894 + struct cm_hist_control *cm_hist_control; 1895 + #endif 1896 + }; 1897 + 1883 1898 struct dc_surface_update { 1884 1899 struct dc_plane_state *surface; 1885 1900 ··· 2032 2019 void get_audio_check(struct audio_info *aud_modes, 2033 2020 struct audio_check *aud_chk); 2034 2021 2035 - /* 2022 + bool fast_nonaddr_updates_exist(struct dc_fast_update *fast_update, int surface_count); 2023 + void populate_fast_updates(struct dc_fast_update *fast_update, 2024 + struct dc_surface_update *srf_updates, 2025 + int surface_count, 2026 + struct dc_stream_update *stream_update); 2027 + /* 2036 2028 * Set up streams and links associated to drive sinks 2037 2029 * The streams parameter is an absolute set of all active streams. 2038 2030 * ··· 2080 2062 /* Return an array of link pointers to edp links. */ 2081 2063 void dc_get_edp_links(const struct dc *dc, 2082 2064 struct dc_link **edp_links, 2083 - int *edp_num); 2065 + unsigned int *edp_num); 2084 2066 2085 2067 void dc_set_edp_power(const struct dc *dc, struct dc_link *edp_link, 2086 2068 bool powerOn);
+9 -2
drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
··· 958 958 { 959 959 uint32_t i; 960 960 961 - if (!dc_dmub_srv || !dc_dmub_srv->dmub) { 961 + if (!dc_dmub_srv) 962 + return; 963 + 964 + if (!dc_dmub_srv->dmub) { 962 965 DC_LOG_ERROR("%s: invalid parameters.", __func__); 963 966 return; 964 967 } ··· 1085 1082 struct dmub_cursor_attributes_cfg *pl_A, const uint8_t p_idx, 1086 1083 const struct hubp *hubp, const struct dpp *dpp) 1087 1084 { 1085 + (void)p_idx; 1088 1086 /* Hubp */ 1089 1087 pl_A->aHubp.SURFACE_ADDR_HIGH = hubp->att.SURFACE_ADDR_HIGH; 1090 1088 pl_A->aHubp.SURFACE_ADDR = hubp->att.SURFACE_ADDR; ··· 1167 1163 { 1168 1164 struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv; 1169 1165 1170 - if (!dc_dmub_srv || !dc_dmub_srv->dmub) { 1166 + if (!dc_dmub_srv) 1167 + return; 1168 + 1169 + if (!dc_dmub_srv->dmub) { 1171 1170 DC_LOG_ERROR("%s: invalid parameters.", __func__); 1172 1171 return; 1173 1172 }
-1
drivers/gpu/drm/amd/display/dc/dc_dsc.h
··· 52 52 uint32_t max_target_bpp; 53 53 uint32_t min_target_bpp; 54 54 bool enable_dsc_when_not_needed; 55 - bool ycbcr422_simple; 56 55 }; 57 56 58 57 struct dc_dsc_config_options {
+1
drivers/gpu/drm/amd/display/dc/dc_helper.c
··· 108 108 uint8_t shift1, uint32_t mask1, uint32_t field_value1, 109 109 va_list ap) 110 110 { 111 + (void)addr; 111 112 uint32_t shift, mask, field_value; 112 113 int i = 1; 113 114
+2
drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c
··· 165 165 int otg_inst, 166 166 int dp_hpo_inst) 167 167 { 168 + (void)dp_hpo_inst; 168 169 if (src == REFCLK) 169 170 dccg31_disable_dpstreamclk(dccg, otg_inst); 170 171 else ··· 645 644 unsigned int xtalin_freq_inKhz, 646 645 unsigned int *dccg_ref_freq_inKhz) 647 646 { 647 + (void)dccg; 648 648 /* 649 649 * Assume refclk is sourced from xtalin 650 650 * expect 24MHz
+1
drivers/gpu/drm/amd/display/dc/dccg/dcn32/dcn32_dccg.c
··· 265 265 unsigned int xtalin_freq_inKhz, 266 266 unsigned int *dccg_ref_freq_inKhz) 267 267 { 268 + (void)dccg; 268 269 /* 269 270 * Assume refclk is sourced from xtalin 270 271 * expect 100MHz
+2
drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
··· 558 558 static int 559 559 dccg35_is_symclk32_se_src_functional_le_new(struct dccg *dccg, int symclk_32_se_inst, int symclk_32_le_inst) 560 560 { 561 + (void)symclk_32_se_inst; 561 562 uint32_t en; 562 563 uint32_t src_sel; 563 564 ··· 2374 2373 uint32_t stream_enc_inst, 2375 2374 uint32_t link_enc_inst) 2376 2375 { 2376 + (void)link_enc_inst; 2377 2377 dccg35_disable_symclk_fe_new(dccg, stream_enc_inst); 2378 2378 2379 2379 /* DMU PHY sequence switches SYMCLK_BE (link_enc_inst) to ref clock once PHY is turned off */
+4 -4
drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
··· 161 161 enum pixel_rate_div tmds_div, 162 162 enum pixel_rate_div unused) 163 163 { 164 + (void)unused; 164 165 struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); 165 166 uint32_t cur_tmds_div = PIXEL_RATE_DIV_NA; 166 167 uint32_t dp_dto_int; ··· 354 353 unsigned int xtalin_freq_inKhz, 355 354 unsigned int *dccg_ref_freq_inKhz) 356 355 { 356 + (void)dccg; 357 357 /* 358 358 * Assume refclk is sourced from xtalin 359 359 * expect 100MHz ··· 528 526 BREAK_TO_DEBUGGER(); 529 527 return; 530 528 } 531 - if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) 532 - REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, 533 - DPSTREAMCLK_GATE_DISABLE, 1, 534 - DPSTREAMCLK_ROOT_GATE_DISABLE, 1); 535 529 } 536 530 537 531 void dccg401_disable_dpstreamclk(struct dccg *dccg, int dp_hpo_inst) ··· 723 725 724 726 void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst, uint32_t num_slices_h) 725 727 { 728 + (void)num_slices_h; 726 729 struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); 727 730 728 731 switch (inst) { ··· 841 842 842 843 void dccg401_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst) 843 844 { 845 + (void)link_enc_inst; 844 846 struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); 845 847 846 848 switch (stream_enc_inst) {
+1
drivers/gpu/drm/amd/display/dc/dccg/dcn42/dcn42_dccg.c
··· 186 186 enum pixel_rate_div tmds_div, 187 187 enum pixel_rate_div unused) 188 188 { 189 + (void)unused; 189 190 struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); 190 191 uint32_t cur_tmds_div = PIXEL_RATE_DIV_NA; 191 192 uint32_t dp_dto_int;
+1
drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
··· 57 57 58 58 static bool dce_abm_set_pipe(struct abm *abm, uint32_t controller_id, uint32_t panel_inst) 59 59 { 60 + (void)panel_inst; 60 61 struct dce_abm *abm_dce = TO_DCE_ABM(abm); 61 62 uint32_t rampingBoundary = 0xFFFF; 62 63
+3
drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
··· 350 350 uint32_t av_stream_map_lane_count, 351 351 uint32_t audio_sdp_overhead) 352 352 { 353 + (void)channel_count; 353 354 /* DP spec recommends between 1.05 to 1.1 safety margin to prevent sample under-run */ 354 355 struct fixed31_32 audio_sdp_margin = dc_fixpt_from_fraction(110, 100); 355 356 struct fixed31_32 horizontal_line_freq_khz = dc_fixpt_from_fraction( ··· 1028 1027 uint32_t actual_pixel_clock_100Hz, 1029 1028 struct azalia_clock_info *azalia_clock_info) 1030 1029 { 1030 + (void)crtc_pixel_clock_100hz; 1031 1031 /* audio_dto_phase= 24 * 10,000; 1032 1032 * 24MHz in [100Hz] units */ 1033 1033 azalia_clock_info->audio_dto_phase = ··· 1045 1043 const struct audio_pll_info *pll_info, 1046 1044 struct azalia_clock_info *azalia_clock_info) 1047 1045 { 1046 + (void)requested_pixel_clock_100Hz; 1048 1047 /* Reported dpDtoSourceClockInkhz value for 1049 1048 * DCE8 already adjusted for SS, do not need any 1050 1049 * adjustment here anymore
+7 -3
drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
··· 539 539 struct pll_settings *pll_settings, 540 540 struct pixel_clk_params *pix_clk_params) 541 541 { 542 + (void)clk_src; 542 543 uint32_t actual_pixel_clock_100hz; 543 544 544 545 actual_pixel_clock_100hz = pix_clk_params->requested_pix_clk_100hz; ··· 611 610 || pix_clk_params->requested_pix_clk_100hz == 0) { 612 611 DC_LOG_ERROR( 613 612 "%s: Invalid parameters!!\n", __func__); 614 - return -1; 613 + return (uint32_t)-1; 615 614 } 616 615 617 616 memset(pll_settings, 0, sizeof(*pll_settings)); ··· 622 621 pll_settings->calculated_pix_clk_100hz = clk_src->ext_clk_khz * 10; 623 622 pll_settings->actual_pix_clk_100hz = 624 623 pix_clk_params->requested_pix_clk_100hz; 625 - return -1; 624 + return (uint32_t)-1; 626 625 } 627 626 628 627 dce112_get_pix_clk_dividers_helper(clk_src, ··· 848 847 enum dp_link_encoding encoding, 849 848 struct pll_settings *pll_settings) 850 849 { 850 + (void)encoding; 851 851 struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source); 852 852 struct bp_pixel_clock_parameters bp_pc_params = {0}; 853 853 ··· 923 921 enum dp_link_encoding encoding, 924 922 struct pll_settings *pll_settings) 925 923 { 924 + (void)encoding; 926 925 struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source); 927 926 struct bp_pixel_clock_parameters bp_pc_params = {0}; 928 927 ··· 1073 1070 enum dp_link_encoding encoding, 1074 1071 struct pll_settings *pll_settings) 1075 1072 { 1073 + (void)encoding; 1076 1074 struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source); 1077 1075 unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0; 1078 1076 const struct pixel_rate_range_table_entry *e = ··· 1380 1376 || pix_clk_params->requested_pix_clk_100hz == 0) { 1381 1377 DC_LOG_ERROR( 1382 1378 "%s: Invalid parameters!!\n", __func__); 1383 - return -1; 1379 + return UINT_MAX; 1384 1380 } 1385 1381 1386 1382 memset(pll_settings, 0, sizeof(*pll_settings));
+1
drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
··· 71 71 72 72 static bool dce_dmcu_init(struct dmcu *dmcu) 73 73 { 74 + (void)dmcu; 74 75 // Do nothing 75 76 return true; 76 77 }
+1
drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c
··· 31 31 size_t slave_address 32 32 ) 33 33 { 34 + (void)pool; 34 35 struct dc *dc = ddc->ctx->dc; 35 36 struct dc_bios *dcb = dc->ctx->dc_bios; 36 37 struct graphics_object_id id = {0};
+2
drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
··· 69 69 struct dce_i2c_hw *dce_i2c_hw, 70 70 uint8_t *returned_bytes) 71 71 { 72 + (void)returned_bytes; 72 73 uint32_t i2c_sw_status = 0; 73 74 uint32_t value = 74 75 REG_GET(DC_I2C_SW_STATUS, DC_I2C_SW_STATUS, &i2c_sw_status); ··· 632 631 struct i2c_command *cmd, 633 632 struct dce_i2c_hw *dce_i2c_hw) 634 633 { 634 + (void)ddc; 635 635 uint8_t index_of_payload = 0; 636 636 bool result; 637 637
+3
drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
··· 67 67 struct resource_pool *pool, 68 68 struct dce_i2c_sw *dce_i2c_sw) 69 69 { 70 + (void)pool; 70 71 dal_ddc_close(dce_i2c_sw->ddc); 71 72 dce_i2c_sw->ddc = NULL; 72 73 } ··· 77 76 struct ddc *ddc, 78 77 uint16_t clock_delay_div_4) 79 78 { 79 + (void)ctx; 80 80 uint32_t scl_retry = 0; 81 81 uint32_t scl_retry_max = I2C_SW_TIMEOUT_DELAY / clock_delay_div_4; 82 82 ··· 471 469 struct i2c_command *cmd, 472 470 struct dce_i2c_sw *dce_i2c_sw) 473 471 { 472 + (void)ddc; 474 473 uint8_t index_of_payload = 0; 475 474 bool result; 476 475
+1
drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c
··· 43 43 const struct dc_cursor_position *position, 44 44 const struct dc_cursor_mi_param *param) 45 45 { 46 + (void)param; 46 47 struct dce_ipp *ipp_dce = TO_DCE_IPP(ipp); 47 48 48 49 /* lock cursor registers */
+4
drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
··· 317 317 struct dce_watermarks urgent, 318 318 uint32_t total_dest_line_time_ns) 319 319 { 320 + (void)stutter_enter; 320 321 struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi); 321 322 uint32_t stutter_en = mi->ctx->dc->debug.disable_stutter ? 0 : 1; 322 323 ··· 371 370 struct dce_watermarks urgent, 372 371 uint32_t total_dest_line_time_ns) 373 372 { 373 + (void)stutter_entry; 374 374 struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi); 375 375 uint32_t stutter_en = mi->ctx->dc->debug.disable_stutter ? 0 : 1; 376 376 ··· 658 656 struct dc_plane_dcc_param *dcc, 659 657 bool horizontal_mirror) 660 658 { 659 + (void)dcc; 660 + (void)horizontal_mirror; 661 661 struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi); 662 662 REG_UPDATE(GRPH_ENABLE, GRPH_ENABLE, 1); 663 663
+1
drivers/gpu/drm/amd/display/dc/dce/dce_opp.c
··· 600 600 enum dc_color_depth color_dpth, 601 601 enum signal_type signal) 602 602 { 603 + (void)color_sp; 603 604 struct dce110_opp *opp110 = TO_DCE110_OPP(opp); 604 605 605 606 REG_UPDATE_2(FMT_DYNAMIC_EXP_CNTL,
+4
drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
··· 271 271 bool use_vsc_sdp_for_colorimetry, 272 272 uint32_t enable_sdp_splitting) 273 273 { 274 + (void)use_vsc_sdp_for_colorimetry; 275 + (void)enable_sdp_splitting; 274 276 uint32_t h_active_start; 275 277 uint32_t v_active_start; 276 278 uint32_t misc0 = 0; ··· 903 901 struct dc_link *link, 904 902 struct stream_encoder *enc) 905 903 { 904 + (void)link; 906 905 struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); 907 906 uint32_t reg1 = 0; 908 907 uint32_t max_retries = DP_BLANK_MAX_RETRY * 10; ··· 954 951 struct stream_encoder *enc, 955 952 const struct encoder_unblank_param *param) 956 953 { 954 + (void)link; 957 955 struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); 958 956 959 957 if (param->link_settings.link_rate != LINK_RATE_UNKNOWN) {
+2
drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
··· 282 282 const struct scaler_data *data, 283 283 struct scl_ratios_inits *inits) 284 284 { 285 + (void)xfm_dce; 285 286 struct fixed31_32 h_init; 286 287 struct fixed31_32 v_init; 287 288 ··· 1241 1240 const struct out_csc_color_matrix *tbl_entry, 1242 1241 enum grph_color_adjust_option options) 1243 1242 { 1243 + (void)options; 1244 1244 { 1245 1245 REG_SET_2(OUTPUT_CSC_C11_C12, 0, 1246 1246 OUTPUT_CSC_C11, tbl_entry->regval[0],
+2 -2
drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
··· 41 41 { 42 42 struct dc_context *dc = abm->ctx; 43 43 struct dc_link *edp_links[MAX_NUM_EDP]; 44 - int i; 45 - int edp_num; 44 + unsigned int i, edp_num; 46 45 unsigned int ret = ABM_FEATURE_NO_SUPPORT; 47 46 48 47 dc_get_edp_links(dc->dc, edp_links, &edp_num); ··· 173 174 unsigned int controller_id, 174 175 unsigned int panel_inst) 175 176 { 177 + (void)controller_id; 176 178 bool ret = false; 177 179 unsigned int feature_support; 178 180
+1
drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
··· 188 188 189 189 bool dmub_abm_set_pause(struct abm *abm, bool pause, unsigned int panel_inst, unsigned int stream_inst) 190 190 { 191 + (void)stream_inst; 191 192 union dmub_rb_cmd cmd; 192 193 struct dc_context *dc = abm->ctx; 193 194 uint8_t panel_mask = 0x01 << panel_inst;
+1 -1
drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
··· 82 82 83 83 if (link->psr_settings.psr_version == DC_PSR_VERSION_1) { 84 84 struct dc_link *edp_links[MAX_NUM_EDP]; 85 - int edp_num; 85 + unsigned int edp_num; 86 86 87 87 dc_get_edp_links(dc, edp_links, &edp_num); 88 88 if (edp_num == 1)
+1
drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
··· 216 216 uint8_t panel_inst, 217 217 uint16_t frame_skip_number) 218 218 { 219 + (void)panel_inst; 219 220 union dmub_rb_cmd cmd; 220 221 struct dc_context *dc = dmub->ctx; 221 222 struct dmub_rb_cmd_replay_set_coasting_vtotal *pCmd = NULL;
+10
drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
··· 40 40 struct dce_mem_input *mem_input110, 41 41 bool immediate) 42 42 { 43 + (void)immediate; 43 44 uint32_t value = 0; 44 45 45 46 value = dm_read_reg( ··· 166 165 const struct dc_tiling_info *info, 167 166 const enum surface_pixel_format pixel_format) 168 167 { 168 + (void)pixel_format; 169 169 uint32_t value = 0; 170 170 171 171 set_reg_field_value(value, info->gfx8.num_banks, ··· 644 642 struct dc_plane_dcc_param *dcc, 645 643 bool horizotal_mirror) 646 644 { 645 + (void)dcc; 646 + (void)horizotal_mirror; 647 647 struct dce_mem_input *mem_input110 = TO_DCE_MEM_INPUT(mem_input); 648 648 649 649 enable(mem_input110); ··· 931 927 struct dce_watermarks urgent, 932 928 uint32_t total_dest_line_time_ns) 933 929 { 930 + (void)stutter_enter; 934 931 program_urgency_watermark_l( 935 932 mem_input->ctx, 936 933 urgent, ··· 975 970 uint32_t pix_clk_khz,/* for current stream */ 976 971 uint32_t total_stream_num) 977 972 { 973 + (void)h_total; 974 + (void)v_total; 975 + (void)total_stream_num; 978 976 uint32_t addr; 979 977 uint32_t value; 980 978 uint32_t pix_dur; ··· 1017 1009 struct mem_input *mi, 1018 1010 uint32_t total_stream_num) 1019 1011 { 1012 + (void)mi; 1013 + (void)total_stream_num; 1020 1014 } 1021 1015 1022 1016 static const struct mem_input_funcs dce110_mem_input_v_funcs = {
+1
drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c
··· 110 110 const struct out_csc_color_matrix *tbl_entry, 111 111 enum grph_color_adjust_option options) 112 112 { 113 + (void)options; 113 114 struct dc_context *ctx = xfm_dce->base.ctx; 114 115 uint32_t cntl_value = dm_read_reg(ctx, mmCOL_MAN_OUTPUT_CSC_CONTROL); 115 116 bool use_set_a = (get_reg_field_value(cntl_value,
+2
drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_regamma_v.c
··· 551 551 struct transform *xfm, 552 552 enum opp_regamma mode) 553 553 { 554 + (void)xfm; 555 + (void)mode; 554 556 // TODO: need to implement the function 555 557 }
+9
drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
··· 66 66 struct timing_generator *tg, 67 67 struct dc_crtc_timing *timing) 68 68 { 69 + (void)tg; 69 70 if (timing->flags.INTERLACE == 1) { 70 71 if (timing->v_front_porch < 2) 71 72 timing->v_front_porch = 2; ··· 1116 1115 const struct dc_crtc_timing *timing, 1117 1116 enum signal_type signal) 1118 1117 { 1118 + (void)signal; 1119 1119 uint32_t h_blank; 1120 1120 uint32_t h_back_porch, hsync_offset, h_sync_start; 1121 1121 ··· 1492 1490 struct timing_generator *tg, 1493 1491 int source_tg_inst) 1494 1492 { 1493 + (void)source_tg_inst; 1495 1494 uint32_t value; 1496 1495 uint32_t rising_edge = 0; 1497 1496 uint32_t falling_edge = 0; ··· 1962 1959 const enum signal_type signal, 1963 1960 bool use_vbios) 1964 1961 { 1962 + (void)vready_offset; 1963 + (void)vstartup_start; 1964 + (void)vupdate_offset; 1965 + (void)vupdate_width; 1966 + (void)pstate_keepout; 1967 + (void)signal; 1965 1968 if (use_vbios) 1966 1969 dce110_timing_generator_program_timing_generator(tg, timing); 1967 1970 else
+9
drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c
··· 442 442 const enum signal_type signal, 443 443 bool use_vbios) 444 444 { 445 + (void)vready_offset; 446 + (void)vstartup_start; 447 + (void)vupdate_offset; 448 + (void)vupdate_width; 449 + (void)pstate_keepout; 450 + (void)signal; 445 451 if (use_vbios) 446 452 dce110_timing_generator_program_timing_generator(tg, timing); 447 453 else ··· 627 621 struct timing_generator *tg, 628 622 const struct dcp_gsl_params *gsl_params) 629 623 { 624 + (void)gsl_params; 630 625 DC_LOG_ERROR("Timing Sync not supported on underlay pipe\n"); 631 626 return; 632 627 } ··· 636 629 struct timing_generator *tg, 637 630 int source_tg_inst) 638 631 { 632 + (void)source_tg_inst; 639 633 DC_LOG_ERROR("Timing Sync not supported on underlay pipe\n"); 640 634 return; 641 635 } ··· 658 650 static void dce110_timing_generator_v_disable_vga( 659 651 struct timing_generator *tg) 660 652 { 653 + (void)tg; 661 654 return; 662 655 } 663 656
+6
drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
··· 371 371 struct rect *luma_viewport, 372 372 struct rect *chroma_viewport) 373 373 { 374 + (void)xfm_dce; 375 + (void)luma_viewport; 376 + (void)chroma_viewport; 374 377 inits->h_int_scale_ratio_luma = 375 378 dc_fixpt_u2d19(data->ratios.horz) << 5; 376 379 inits->v_int_scale_ratio_luma = ··· 622 619 struct transform *xfm, 623 620 const struct xfm_grph_csc_adjustment *adjust) 624 621 { 622 + (void)xfm; 623 + (void)adjust; 625 624 /* DO NOTHING*/ 626 625 } 627 626 ··· 632 627 enum lb_pixel_depth depth, 633 628 const struct bit_depth_reduction_params *bit_depth_params) 634 629 { 630 + (void)bit_depth_params; 635 631 struct dce_transform *xfm_dce = TO_DCE_TRANSFORM(xfm); 636 632 int pixel_depth = 0; 637 633 int expan_mode = 0;
+1
drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.c
··· 284 284 struct dce112_compressor *cp110, 285 285 uint32_t pixels) 286 286 { 287 + (void)cp110; 287 288 return 256 * ((pixels + 255) / 256); 288 289 } 289 290
+7
drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
··· 304 304 struct timing_generator *tg, 305 305 int source) 306 306 { 307 + (void)source; 307 308 enum trigger_source_select trig_src_select = TRIGGER_SOURCE_SELECT_LOGIC_ZERO; 308 309 struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); 309 310 uint32_t rising_edge = 0; ··· 702 701 const enum signal_type signal, 703 702 bool use_vbios) 704 703 { 704 + (void)vready_offset; 705 + (void)vstartup_start; 706 + (void)vupdate_offset; 707 + (void)vupdate_width; 708 + (void)pstate_keepout; 709 + (void)signal; 705 710 if (use_vbios) 706 711 dce110_timing_generator_program_timing_generator(tg, timing); 707 712 else
+6
drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c
··· 115 115 const enum signal_type signal, 116 116 bool use_vbios) 117 117 { 118 + (void)vready_offset; 119 + (void)vstartup_start; 120 + (void)vupdate_offset; 121 + (void)vupdate_width; 122 + (void)pstate_keepout; 123 + (void)signal; 118 124 if (!use_vbios) 119 125 program_pix_dur(tg, timing->pix_clk_100hz); 120 126
+4 -4
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
··· 381 381 } 382 382 383 383 for (i = region_end - region_start; i < MAX_REGIONS_NUMBER ; i++) 384 - seg_distr[i] = -1; 384 + seg_distr[i] = (uint32_t)-1; 385 385 386 386 for (k = 0; k < MAX_REGIONS_NUMBER; k++) { 387 - if (seg_distr[k] != -1) 387 + if (seg_distr[k] != (uint32_t)-1) 388 388 hw_points += (1 << seg_distr[k]); 389 389 } 390 390 ··· 565 565 566 566 567 567 for (i = region_end - region_start; i < MAX_REGIONS_NUMBER ; i++) 568 - seg_distr[i] = -1; 568 + seg_distr[i] = (uint32_t)-1; 569 569 /* 12 segments 570 570 * segments are from 2^-12 to 0 571 571 */ ··· 573 573 seg_distr[i] = 4; 574 574 575 575 for (k = 0; k < MAX_REGIONS_NUMBER; k++) { 576 - if (seg_distr[k] != -1) 576 + if (seg_distr[k] != (uint32_t)-1) 577 577 hw_points += (1 << seg_distr[k]); 578 578 } 579 579
+2
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c
··· 44 44 45 45 static bool dwb1_get_caps(struct dwbc *dwbc, struct dwb_caps *caps) 46 46 { 47 + (void)dwbc; 47 48 if (caps) { 48 49 caps->adapter_id = 0; /* we only support 1 adapter currently */ 49 50 caps->hw_version = DCN_VERSION_1_0; ··· 64 63 65 64 static bool dwb1_enable(struct dwbc *dwbc, struct dc_dwb_params *params) 66 65 { 66 + (void)params; 67 67 struct dcn10_dwbc *dwbc10 = TO_DCN10_DWBC(dwbc); 68 68 69 69 /* disable first. */
+2 -2
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c
··· 746 746 src_width, dest_width); 747 747 748 748 if (dc_fixpt_floor(tmp_h_ratio_luma) == 8) 749 - h_ratio_luma = -1; 749 + h_ratio_luma = (uint32_t)-1; 750 750 else 751 751 h_ratio_luma = dc_fixpt_u3d19(tmp_h_ratio_luma) << 5; 752 752 ··· 824 824 src_height, dest_height); 825 825 826 826 if (dc_fixpt_floor(tmp_v_ratio_luma) == 8) 827 - v_ratio_luma = -1; 827 + v_ratio_luma = (uint32_t)-1; 828 828 else 829 829 v_ratio_luma = dc_fixpt_u3d19(tmp_v_ratio_luma) << 5; 830 830
+2 -2
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c
··· 159 159 } 160 160 161 161 for (i = region_end - region_start; i < MAX_REGIONS_NUMBER ; i++) 162 - seg_distr[i] = -1; 162 + seg_distr[i] = (uint32_t)-1; 163 163 164 164 for (k = 0; k < MAX_REGIONS_NUMBER; k++) { 165 - if (seg_distr[k] != -1) 165 + if (seg_distr[k] != (uint32_t)-1) 166 166 hw_points += (1 << seg_distr[k]); 167 167 } 168 168
+1
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c
··· 77 77 unsigned int az_inst, 78 78 struct audio_info *audio_info) 79 79 { 80 + (void)az_inst; 80 81 struct dcn31_apg *apg31 = DCN31_APG_FROM_APG(apg); 81 82 82 83 ASSERT(audio_info);
+2
drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c
··· 249 249 bool use_vsc_sdp_for_colorimetry, 250 250 uint32_t enable_sdp_splitting) 251 251 { 252 + (void)enable_sdp_splitting; 252 253 uint32_t h_active_start; 253 254 uint32_t v_active_start; 254 255 uint32_t misc0 = 0; ··· 784 783 const uint8_t *custom_sdp_message, 785 784 unsigned int sdp_message_size) 786 785 { 786 + (void)sdp_message_size; 787 787 struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); 788 788 uint32_t value = 0; 789 789
+2
drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c
··· 394 394 uint32_t dsc_bytes_per_pixel, 395 395 uint32_t dsc_slice_width) 396 396 { 397 + (void)dsc_bytes_per_pixel; 398 + (void)dsc_slice_width; 397 399 struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); 398 400 399 401 REG_UPDATE(DP_DSC_CNTL, DP_DSC_MODE, dsc_mode == OPTC_DSC_DISABLED ? 0 : 1);
+2
drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c
··· 356 356 uint32_t dsc_bytes_per_pixel, 357 357 uint32_t dsc_slice_width) 358 358 { 359 + (void)dsc_bytes_per_pixel; 360 + (void)dsc_slice_width; 359 361 struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); 360 362 361 363 REG_UPDATE(DP_DSC_CNTL, DP_DSC_MODE, dsc_mode == OPTC_DSC_DISABLED ? 0 : 1);
+1
drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c
··· 369 369 uint32_t stream_enc_inst, 370 370 uint32_t link_enc_inst) 371 371 { 372 + (void)stream_enc_inst; 372 373 struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); 373 374 374 375 ASSERT(stream_enc_inst < 5 && link_enc_inst < 5);
+3
drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c
··· 57 57 struct stream_encoder *enc, 58 58 bool odm_combine) 59 59 { 60 + (void)enc; 61 + (void)odm_combine; 60 62 } 61 63 62 64 /* setup stream encoder in dvi mode */ ··· 712 710 uint32_t stream_enc_inst, 713 711 uint32_t link_enc_inst) 714 712 { 713 + (void)stream_enc_inst; 715 714 struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); 716 715 717 716 REG_UPDATE(STREAM_MAPPER_CONTROL,
+79 -32
drivers/gpu/drm/amd/display/dc/dio/virtual/virtual_stream_encoder.c
··· 31 31 struct dc_crtc_timing *crtc_timing, 32 32 enum dc_color_space output_color_space, 33 33 bool use_vsc_sdp_for_colorimetry, 34 - uint32_t enable_sdp_splitting) {} 34 + uint32_t enable_sdp_splitting) { 35 + (void)enc; 36 + (void)crtc_timing; 37 + (void)output_color_space; 38 + (void)use_vsc_sdp_for_colorimetry; 39 + (void)enable_sdp_splitting; 40 + } 35 41 36 42 static void virtual_stream_encoder_hdmi_set_stream_attribute( 37 43 struct stream_encoder *enc, 38 44 struct dc_crtc_timing *crtc_timing, 39 45 int actual_pix_clk_khz, 40 - bool enable_audio) {} 46 + bool enable_audio) { 47 + (void)enc; 48 + (void)crtc_timing; 49 + (void)actual_pix_clk_khz; 50 + (void)enable_audio; 51 + } 41 52 42 53 static void virtual_stream_encoder_dvi_set_stream_attribute( 43 54 struct stream_encoder *enc, 44 55 struct dc_crtc_timing *crtc_timing, 45 - bool is_dual_link) {} 56 + bool is_dual_link) { 57 + (void)enc; 58 + (void)crtc_timing; 59 + (void)is_dual_link; 60 + } 46 61 47 62 static void virtual_stream_encoder_set_throttled_vcp_size( 48 63 struct stream_encoder *enc, 49 - struct fixed31_32 avg_time_slots_per_mtp) 50 - {} 64 + struct fixed31_32 avg_time_slots_per_mtp) { 65 + (void)enc; 66 + (void)avg_time_slots_per_mtp; 67 + } 51 68 52 69 static void virtual_stream_encoder_update_hdmi_info_packets( 53 70 struct stream_encoder *enc, 54 - const struct encoder_info_frame *info_frame) {} 71 + const struct encoder_info_frame *info_frame) { 72 + (void)enc; 73 + (void)info_frame; 74 + } 55 75 56 76 static void virtual_stream_encoder_stop_hdmi_info_packets( 57 - struct stream_encoder *enc) {} 77 + struct stream_encoder *enc) { 78 + (void)enc; 79 + } 58 80 59 81 static void virtual_stream_encoder_set_avmute( 60 - struct stream_encoder *enc, 61 - bool enable) {} 82 + struct stream_encoder *enc, bool enable) { 83 + (void)enc; 84 + (void)enable; 85 + } 62 86 static void virtual_stream_encoder_update_dp_info_packets( 63 87 struct stream_encoder *enc, 64 - const struct encoder_info_frame *info_frame) {} 88 + const struct encoder_info_frame *info_frame) { 89 + (void)enc; 90 + (void)info_frame; 91 + } 65 92 66 93 static void virtual_stream_encoder_stop_dp_info_packets( 67 - struct stream_encoder *enc) {} 94 + struct stream_encoder *enc) { 95 + (void)enc; 96 + } 68 97 69 98 static void virtual_stream_encoder_dp_blank( 70 99 struct dc_link *link, 71 - struct stream_encoder *enc) {} 100 + struct stream_encoder *enc) { 101 + (void)link; 102 + (void)enc; 103 + } 72 104 73 105 static void virtual_stream_encoder_dp_unblank( 74 106 struct dc_link *link, 75 107 struct stream_encoder *enc, 76 - const struct encoder_unblank_param *param) {} 108 + const struct encoder_unblank_param *param) { 109 + (void)enc; 110 + (void)link; 111 + (void)param; 112 + } 77 113 78 114 static void virtual_audio_mute_control( 79 - struct stream_encoder *enc, 80 - bool mute) {} 115 + struct stream_encoder *enc, bool mute) { 116 + (void)enc; 117 + (void)mute; 118 + } 81 119 82 120 static void virtual_stream_encoder_reset_hdmi_stream_attribute( 83 - struct stream_encoder *enc) 84 - {} 121 + struct stream_encoder *enc) 122 + { 123 + (void)enc; 124 + } 85 125 86 126 static void virtual_enc_dp_set_odm_combine( 87 - struct stream_encoder *enc, 88 - bool odm_combine) 89 - {} 127 + struct stream_encoder *enc, bool odm_combine) { 128 + (void)enc; 129 + (void)odm_combine; 130 + } 90 131 91 132 static void virtual_dig_connect_to_otg( 92 - struct stream_encoder *enc, 93 - int tg_inst) 94 - {} 133 + struct stream_encoder *enc, int tg_inst) { 134 + (void)enc; 135 + (void)tg_inst; 136 + } 95 137 96 138 static void virtual_setup_stereo_sync( 97 - struct stream_encoder *enc, 98 - int tg_inst, 99 - bool enable) 100 - {} 139 + struct stream_encoder *enc, 140 + int tg_inst, bool enable) { 141 + (void)enc; 142 + (void)tg_inst; 143 + (void)enable; 144 + } 101 145 102 146 static void virtual_stream_encoder_set_dsc_pps_info_packet( 103 - struct stream_encoder *enc, 104 - bool enable, 105 - uint8_t *dsc_packed_pps, 106 - bool immediate_update) 107 - {} 147 + struct stream_encoder *enc, bool enable, uint8_t *dsc_packed_pps, 148 + bool immediate_update) 149 + { 150 + (void)enc; 151 + (void)enable; 152 + (void)dsc_packed_pps; 153 + (void)immediate_update; 154 + } 108 155 109 156 static const struct stream_encoder_funcs virtual_str_enc_funcs = { 110 157 .dp_set_odm_combine =
+1
drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
··· 525 525 struct pipe_ctx *primary_pipe, 526 526 struct pipe_ctx *secondary_pipe) 527 527 { 528 + (void)res_ctx; 528 529 int pipe_idx = secondary_pipe->pipe_idx; 529 530 530 531 if (!primary_pipe->plane_state)
+1 -1
drivers/gpu/drm/amd/display/dc/dml/dcn10/dcn10_fpu.c
··· 76 76 .line_buffer_size_bits = 589824, 77 77 .max_line_buffer_lines = 12, 78 78 .IsLineBufferBppFixed = 0, 79 - .LineBufferFixedBpp = -1, 79 + .LineBufferFixedBpp = (unsigned int)-1, 80 80 .writeback_luma_buffer_size_kbytes = 12, 81 81 .writeback_chroma_buffer_size_kbytes = 8, 82 82 .max_num_dpp = 4,
+2 -1
drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
··· 1316 1316 display_e2e_pipe_params_st *pipes, 1317 1317 enum dc_validate_mode validate_mode) 1318 1318 { 1319 + (void)validate_mode; 1319 1320 int pipe_cnt, i; 1320 1321 bool synchronized_vblank = true; 1321 1322 struct resource_context *res_ctx = &context->res_ctx; ··· 2336 2335 /*Unsafe due to current pipe merge and split logic*/ 2337 2336 ASSERT(context != dc->current_state); 2338 2337 2339 - out = dcn21_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, validate_mode); 2338 + out = dcn21_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, validate_mode, false); 2340 2339 2341 2340 if (pipe_cnt == 0) 2342 2341 goto validate_out;
+4
drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
··· 501 501 double *VUpdateWidthPix, 502 502 double *VReadyOffsetPix) 503 503 { 504 + (void)mode_lib; 505 + 504 506 bool MyError = false; 505 507 unsigned int DPPCycles, DISPCLKCycles; 506 508 double DSTTotalPixelsAfterScaler, TotalRepeaterDelayTime; ··· 880 878 unsigned int *dpte_row_height, 881 879 unsigned int *meta_row_height) 882 880 { 881 + (void)ViewportWidth; 883 882 unsigned int MetaRequestHeight; 884 883 unsigned int MetaRequestWidth; 885 884 unsigned int MetaSurfWidth; ··· 2956 2953 double *TInitXFill, 2957 2954 double *TslvChk) 2958 2955 { 2956 + (void)mode_lib; 2959 2957 double TSlvSetup, AvgfillRate, result; 2960 2958 2961 2959 *SrcActiveDrainRate = VRatio * SwathWidth * Bpp / LineTime;
+5
drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
··· 495 495 double *DSTYAfterScaler 496 496 ) 497 497 { 498 + (void)ReturnBW; 498 499 unsigned int DPPCycles, DISPCLKCycles; 499 500 double DataFabricLineDeliveryTimeLuma; 500 501 double DataFabricLineDeliveryTimeChroma; ··· 593 592 double *VUpdateWidthPix, 594 593 double *VReadyOffsetPix) 595 594 { 595 + (void)mode_lib; 596 + 596 597 bool MyError = false; 597 598 double TotalRepeaterDelayTime; 598 599 double Tdm, LineTime, Tsetup; ··· 941 938 unsigned int *dpte_row_height, 942 939 unsigned int *meta_row_height) 943 940 { 941 + (void)ViewportWidth; 944 942 unsigned int MetaRequestHeight; 945 943 unsigned int MetaRequestWidth; 946 944 unsigned int MetaSurfWidth; ··· 3030 3026 double *TInitXFill, 3031 3027 double *TslvChk) 3032 3028 { 3029 + (void)mode_lib; 3033 3030 double TSlvSetup, AvgfillRate, result; 3034 3031 3035 3032 *SrcActiveDrainRate = VRatio * SwathWidth * Bpp / LineTime;
+5
drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
··· 126 126 unsigned int delivery_width, 127 127 unsigned int req_per_swath_ub) 128 128 { 129 + (void)mode_lib; 129 130 double refcyc_per_delivery = 0.0; 130 131 131 132 if (vratio <= 1.0) { ··· 1539 1538 const bool ignore_viewport_pos, 1540 1539 const bool immediate_flip_support) 1541 1540 { 1541 + (void)vm_en; 1542 + (void)ignore_viewport_pos; 1543 + (void)immediate_flip_support; 1542 1544 display_rq_params_st rq_param = {0}; 1543 1545 display_dlg_sys_params_st dlg_sys_param = {0}; 1544 1546 ··· 1592 1588 unsigned int cur_width, 1593 1589 enum cursor_bpp cur_bpp) 1594 1590 { 1591 + (void)mode_lib; 1595 1592 unsigned int cur_src_width = cur_width; 1596 1593 unsigned int cur_req_size = 0; 1597 1594 unsigned int cur_req_width = 0;
+5
drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
··· 126 126 unsigned int delivery_width, 127 127 unsigned int req_per_swath_ub) 128 128 { 129 + (void)mode_lib; 129 130 double refcyc_per_delivery = 0.0; 130 131 131 132 if (vratio <= 1.0) { ··· 1540 1539 const bool ignore_viewport_pos, 1541 1540 const bool immediate_flip_support) 1542 1541 { 1542 + (void)vm_en; 1543 + (void)ignore_viewport_pos; 1544 + (void)immediate_flip_support; 1543 1545 display_rq_params_st rq_param = {0}; 1544 1546 display_dlg_sys_params_st dlg_sys_param = {0}; 1545 1547 ··· 1593 1589 unsigned int cur_width, 1594 1590 enum cursor_bpp cur_bpp) 1595 1591 { 1592 + (void)mode_lib; 1596 1593 unsigned int cur_src_width = cur_width; 1597 1594 unsigned int cur_req_size = 0; 1598 1595 unsigned int cur_req_width = 0;
+19
drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
··· 695 695 double *VUpdateWidthPix, 696 696 double *VReadyOffsetPix) 697 697 { 698 + (void)mode_lib; 699 + (void)XFCEnabled; 700 + 698 701 bool MyError = false; 699 702 unsigned int DPPCycles, DISPCLKCycles; 700 703 double DSTTotalPixelsAfterScaler, TotalRepeaterDelayTime; ··· 1293 1290 unsigned int *DPDE0BytesFrame, 1294 1291 unsigned int *MetaPTEBytesFrame) 1295 1292 { 1293 + (void)SourcePixelFormat; 1294 + (void)ViewportWidth; 1296 1295 unsigned int MPDEBytesFrame; 1297 1296 unsigned int DCCMetaSurfaceBytes; 1298 1297 unsigned int MacroTileSizeBytes; ··· 3045 3040 double *TInitXFill, 3046 3041 double *TslvChk) 3047 3042 { 3043 + (void)mode_lib; 3048 3044 double TSlvSetup, AvgfillRate, result; 3049 3045 3050 3046 *SrcActiveDrainRate = VRatio * SwathWidth * Bpp / LineTime; ··· 3193 3187 double *final_flip_bw, 3194 3188 bool *ImmediateFlipSupportedForPipe) 3195 3189 { 3190 + (void)mode_lib; 3196 3191 double min_row_time = 0.0; 3197 3192 unsigned int HostVMDynamicLevels; 3198 3193 double TimeForFetchingMetaPTEImmediateFlip; ··· 5301 5294 double *StutterEnterPlusExitWatermark, 5302 5295 double *MinActiveDRAMClockChangeLatencySupported) 5303 5296 { 5297 + (void)DPPCLK; 5298 + (void)SwathWidthSingleDPPY; 5299 + (void)DCFCLK; 5300 + (void)UrgentOutOfOrderReturn; 5301 + (void)ReturnBW; 5302 + (void)GPUVMEnable; 5303 + (void)dpte_group_bytes; 5304 + (void)MetaChunkSize; 5305 + 5304 5306 double EffectiveLBLatencyHidingY; 5305 5307 double EffectiveLBLatencyHidingC; 5306 5308 double DPPOutputBufferLinesY; ··· 5901 5885 double TimePerVMRequestVBlank[], 5902 5886 double TimePerVMRequestFlip[]) 5903 5887 { 5888 + (void)VRatioPrefetchY; 5889 + (void)VRatioPrefetchC; 5890 + 5904 5891 unsigned int meta_chunk_width; 5905 5892 unsigned int min_meta_chunk_width; 5906 5893 unsigned int meta_chunk_per_row_int;
+5
drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
··· 102 102 unsigned int delivery_width, 103 103 unsigned int req_per_swath_ub) 104 104 { 105 + (void)mode_lib; 105 106 double refcyc_per_delivery = 0.0; 106 107 107 108 if (vratio <= 1.0) { ··· 1648 1647 const bool ignore_viewport_pos, 1649 1648 const bool immediate_flip_support) 1650 1649 { 1650 + (void)vm_en; 1651 + (void)ignore_viewport_pos; 1652 + (void)immediate_flip_support; 1651 1653 display_rq_params_st rq_param = {0}; 1652 1654 display_dlg_sys_params_st dlg_sys_param = {0}; 1653 1655 ··· 1706 1702 unsigned int cur_width, 1707 1703 enum cursor_bpp cur_bpp) 1708 1704 { 1705 + (void)mode_lib; 1709 1706 unsigned int cur_src_width = cur_width; 1710 1707 unsigned int cur_req_size = 0; 1711 1708 unsigned int cur_req_width = 0;
+2
drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
··· 571 571 unsigned int *dcfclk_mhz, 572 572 unsigned int *dram_speed_mts) 573 573 { 574 + (void)bw_params; 574 575 unsigned int i; 575 576 576 577 dc_assert_fp_enabled(); ··· 721 720 722 721 void patch_dcn30_soc_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *dcn3_0_ip) 723 722 { 723 + (void)dcn3_0_ip; 724 724 dc_assert_fp_enabled(); 725 725 726 726 if (dc->ctx->dc_bios->funcs->get_soc_bb_info) {
+23
drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
··· 778 778 double *RequiredPrefetchPixDataBWChroma, 779 779 bool *NotEnoughTimeForDynamicMetadata) 780 780 { 781 + (void)SwathWidthY; 782 + (void)SwathWidthC; 781 783 struct vba_vars_st *v = &mode_lib->vba; 782 784 double DPPCLKDelaySubtotalPlusCNVCFormater = v->DPPCLKDelaySubtotal + v->DPPCLKDelayCNVCFormater; 783 785 bool MyError = false; ··· 1235 1233 unsigned int *IndependentBlockLuma, 1236 1234 unsigned int *IndependentBlockChroma) 1237 1235 { 1236 + (void)SurfaceWidthChroma; 1237 + (void)SurfaceHeightChroma; 1238 + (void)BytePerPixelDETY; 1239 + (void)BytePerPixelDETC; 1238 1240 int yuv420 = 0; 1239 1241 int horz_div_l = 0; 1240 1242 int horz_div_c = 0; ··· 1601 1595 unsigned int *DPDE0BytesFrame, 1602 1596 unsigned int *MetaPTEBytesFrame) 1603 1597 { 1598 + (void)SourcePixelFormat; 1604 1599 unsigned int MPDEBytesFrame = 0; 1605 1600 unsigned int DCCMetaSurfaceBytes = 0; 1606 1601 unsigned int MacroTileSizeBytes = 0; ··· 3075 3068 unsigned int HTotal, 3076 3069 unsigned int WritebackLineBufferSize) 3077 3070 { 3071 + (void)WritebackPixelFormat; 3072 + (void)WritebackVRatio; 3078 3073 double DISPCLK_H = 0, DISPCLK_V = 0, DISPCLK_HB = 0; 3079 3074 3080 3075 DISPCLK_H = PixelClock * dml_ceil(WritebackHTaps / 8.0, 1) / WritebackHRatio; ··· 3095 3086 long WritebackSourceHeight, 3096 3087 unsigned int HTotal) 3097 3088 { 3089 + (void)WritebackPixelFormat; 3090 + (void)WritebackHRatio; 3098 3091 double CalculateWriteBackDelay = 0; 3099 3092 double Line_length = 0; 3100 3093 double Output_lines_last_notclamped = 0; ··· 3210 3199 double *final_flip_bw, 3211 3200 bool *ImmediateFlipSupportedForPipe) 3212 3201 { 3202 + (void)mode_lib; 3203 + (void)HostVMMinPageSize; 3213 3204 double min_row_time = 0.0; 3214 3205 unsigned int HostVMDynamicLevelsTrips = 0; 3215 3206 double TimeForFetchingMetaPTEImmediateFlip = 0; ··· 4981 4968 double BytePerPixelDETC[], 4982 4969 enum clock_change_support *DRAMClockChangeSupport) 4983 4970 { 4971 + (void)DCFCLK; 4972 + (void)ReturnBW; 4973 + (void)DPPCLK; 4974 + (void)DETBufferSizeC; 4984 4975 struct vba_vars_st *v = &mode_lib->vba; 4985 4976 double EffectiveLBLatencyHidingY = 0; 4986 4977 double EffectiveLBLatencyHidingC = 0; ··· 5229 5212 double *UrgentBurstFactorChroma, 5230 5213 bool *NotEnoughUrgentLatencyHiding) 5231 5214 { 5215 + (void)DETBufferSizeInKByte; 5216 + (void)VRatioC; 5232 5217 double LinesInDETLuma = 0; 5233 5218 double LinesInDETChroma = 0; 5234 5219 unsigned int LinesInCursorBuffer = 0; ··· 5594 5575 double TimePerVMRequestVBlank[], 5595 5576 double TimePerVMRequestFlip[]) 5596 5577 { 5578 + (void)dpte_row_width_luma_ub; 5579 + (void)dpte_row_width_chroma_ub; 5597 5580 int num_group_per_lower_vm_stage = 0; 5598 5581 int num_req_per_lower_vm_stage = 0; 5599 5582 unsigned int k; ··· 5878 5857 bool ViewportSizeSupportPerPlane[], 5879 5858 bool *ViewportSizeSupport) 5880 5859 { 5860 + (void)HRatioChroma; 5881 5861 int MaximumSwathHeightY[DC__NUM_DPP__MAX] = { 0 }; 5882 5862 int MaximumSwathHeightC[DC__NUM_DPP__MAX] = { 0 }; 5883 5863 int MinimumSwathHeightY = 0; ··· 6061 6039 unsigned int swath_width_luma_ub[], 6062 6040 unsigned int swath_width_chroma_ub[]) 6063 6041 { 6042 + (void)BytePerPixY; 6064 6043 unsigned int k, j; 6065 6044 long surface_width_ub_l; 6066 6045 long surface_height_ub_l;
+5
drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
··· 50 50 unsigned int delivery_width, 51 51 unsigned int req_per_swath_ub) 52 52 { 53 + (void)mode_lib; 53 54 double refcyc_per_delivery = 0.0; 54 55 55 56 if (vratio <= 1.0) { ··· 805 804 unsigned int cur_width, 806 805 enum cursor_bpp cur_bpp) 807 806 { 807 + (void)mode_lib; 808 808 unsigned int cur_src_width = cur_width; 809 809 unsigned int cur_req_size = 0; 810 810 unsigned int cur_req_width = 0; ··· 898 896 const bool ignore_viewport_pos, 899 897 const bool immediate_flip_support) 900 898 { 899 + (void)vm_en; 900 + (void)ignore_viewport_pos; 901 + (void)immediate_flip_support; 901 902 const display_pipe_source_params_st *src = &e2e_pipe_param[pipe_idx].pipe.src; 902 903 const display_pipe_dest_params_st *dst = &e2e_pipe_param[pipe_idx].pipe.dest; 903 904 const display_output_params_st *dout = &e2e_pipe_param[pipe_idx].dout;
+24
drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
··· 873 873 double *VUpdateWidthPix, 874 874 double *VReadyOffsetPix) 875 875 { 876 + (void)mode_lib; 877 + (void)HostVMMinPageSize; 878 + (void)SwathWidthY; 879 + (void)SwathWidthC; 880 + 876 881 bool MyError = false; 877 882 unsigned int DPPCycles, DISPCLKCycles; 878 883 double DSTTotalPixelsAfterScaler; ··· 1496 1491 unsigned int *IndependentBlockLuma, 1497 1492 unsigned int *IndependentBlockChroma) 1498 1493 { 1494 + (void)SurfaceWidthChroma; 1495 + (void)SurfaceHeightChroma; 1496 + (void)BytePerPixelDETY; 1497 + (void)BytePerPixelDETC; 1499 1498 int yuv420; 1500 1499 int horz_div_l; 1501 1500 int horz_div_c; ··· 1832 1823 int *DPDE0BytesFrame, 1833 1824 int *MetaPTEBytesFrame) 1834 1825 { 1826 + (void)SourcePixelFormat; 1835 1827 struct vba_vars_st *v = &mode_lib->vba; 1836 1828 unsigned int MPDEBytesFrame; 1837 1829 unsigned int DCCMetaSurfaceBytes; ··· 3375 3365 unsigned int HTotal, 3376 3366 unsigned int WritebackLineBufferSize) 3377 3367 { 3368 + (void)WritebackPixelFormat; 3369 + (void)WritebackVRatio; 3378 3370 double DISPCLK_H, DISPCLK_V, DISPCLK_HB; 3379 3371 3380 3372 DISPCLK_H = PixelClock * dml_ceil(WritebackHTaps / 8.0, 1) / WritebackHRatio; ··· 3395 3383 int WritebackSourceHeight, 3396 3384 unsigned int HTotal) 3397 3385 { 3386 + (void)WritebackPixelFormat; 3387 + (void)WritebackHRatio; 3398 3388 double CalculateWriteBackDelay; 3399 3389 double Line_length; 3400 3390 double Output_lines_last_notclamped; ··· 5580 5566 double *Z8StutterExitWatermark, 5581 5567 double *Z8StutterEnterPlusExitWatermark) 5582 5568 { 5569 + (void)DCFCLK; 5570 + (void)ReturnBW; 5571 + (void)DETBufferSizeC; 5583 5572 struct vba_vars_st *v = &mode_lib->vba; 5584 5573 double EffectiveLBLatencyHidingY; 5585 5574 double EffectiveLBLatencyHidingC; ··· 5848 5831 double *UrgentBurstFactorChroma, 5849 5832 bool *NotEnoughUrgentLatencyHiding) 5850 5833 { 5834 + (void)VRatioC; 5851 5835 double LinesInDETLuma; 5852 5836 double LinesInDETChroma; 5853 5837 unsigned int LinesInCursorBuffer; ··· 6231 6213 double TimePerVMRequestVBlank[], 6232 6214 double TimePerVMRequestFlip[]) 6233 6215 { 6216 + (void)dpte_row_width_luma_ub; 6217 + (void)dpte_row_width_chroma_ub; 6234 6218 int num_group_per_lower_vm_stage; 6235 6219 int num_req_per_lower_vm_stage; 6236 6220 int k; ··· 6370 6350 int *Z8NumberOfStutterBurstsPerFrame, 6371 6351 double *StutterPeriod) 6372 6352 { 6353 + (void)ConfigReturnBufferSizeInKByte; 6354 + 6373 6355 struct vba_vars_st *v = &mode_lib->vba; 6374 6356 6375 6357 double DETBufferingTimeY; ··· 6671 6649 bool ViewportSizeSupportPerPlane[], 6672 6650 bool *ViewportSizeSupport) 6673 6651 { 6652 + (void)HRatioChroma; 6674 6653 int MaximumSwathHeightY[DC__NUM_DPP__MAX]; 6675 6654 int MaximumSwathHeightC[DC__NUM_DPP__MAX]; 6676 6655 int MinimumSwathHeightY; ··· 6846 6823 int swath_width_luma_ub[], 6847 6824 int swath_width_chroma_ub[]) 6848 6825 { 6826 + (void)BytePerPixY; 6849 6827 enum odm_combine_mode MainPlaneODMCombine; 6850 6828 int j, k; 6851 6829
+8
drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
··· 51 51 unsigned int delivery_width, 52 52 unsigned int req_per_swath_ub) 53 53 { 54 + (void)mode_lib; 54 55 double refcyc_per_delivery = 0.0; 55 56 56 57 if (vratio <= 1.0) { ··· 786 785 unsigned int cur_width, 787 786 enum cursor_bpp cur_bpp) 788 787 { 788 + (void)mode_lib; 789 789 unsigned int cur_src_width = cur_width; 790 790 unsigned int cur_req_size = 0; 791 791 unsigned int cur_req_width = 0; ··· 861 859 const bool ignore_viewport_pos, 862 860 const bool immediate_flip_support) 863 861 { 862 + (void)cstate_en; 863 + (void)pstate_en; 864 + (void)vm_en; 865 + (void)ignore_viewport_pos; 866 + (void)immediate_flip_support; 867 + (void)dlg_sys_param; 864 868 const display_pipe_source_params_st *src = &e2e_pipe_param[pipe_idx].pipe.src; 865 869 const display_pipe_dest_params_st *dst = &e2e_pipe_param[pipe_idx].pipe.dest; 866 870 const display_clocks_and_cfg_st *clks = &e2e_pipe_param[pipe_idx].clks_cfg;
+1 -5
drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
··· 391 391 } 392 392 context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_14_DEFAULT_DET_SIZE; 393 393 394 - dc->config.enable_4to1MPC = false; 395 394 if (pipe_cnt == 1 && pipe->plane_state 396 395 && pipe->plane_state->rotation == ROTATION_ANGLE_0 && !dc->debug.disable_z9_mpc) { 397 - if (is_dual_plane(pipe->plane_state->format) 398 - && pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) { 399 - dc->config.enable_4to1MPC = true; 400 - } else if (!is_dual_plane(pipe->plane_state->format) && pipe->plane_state->src_rect.width <= 5120) { 396 + if (!is_dual_plane(pipe->plane_state->format) && pipe->plane_state->src_rect.width <= 5120) { 401 397 /* Limit to 5k max to avoid forced pipe split when there is not enough detile for swath */ 402 398 context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192; 403 399 pipes[0].pipe.src.unbounded_req_mode = true;
+24
drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
··· 891 891 double *VUpdateWidthPix, 892 892 double *VReadyOffsetPix) 893 893 { 894 + (void)mode_lib; 895 + (void)HostVMMinPageSize; 896 + (void)SwathWidthY; 897 + (void)SwathWidthC; 898 + 894 899 bool MyError = false; 895 900 unsigned int DPPCycles, DISPCLKCycles; 896 901 double DSTTotalPixelsAfterScaler; ··· 1513 1508 unsigned int *IndependentBlockLuma, 1514 1509 unsigned int *IndependentBlockChroma) 1515 1510 { 1511 + (void)SurfaceWidthChroma; 1512 + (void)SurfaceHeightChroma; 1513 + (void)BytePerPixelDETY; 1514 + (void)BytePerPixelDETC; 1516 1515 int yuv420; 1517 1516 int horz_div_l; 1518 1517 int horz_div_c; ··· 1849 1840 int *DPDE0BytesFrame, 1850 1841 int *MetaPTEBytesFrame) 1851 1842 { 1843 + (void)SourcePixelFormat; 1852 1844 struct vba_vars_st *v = &mode_lib->vba; 1853 1845 unsigned int MPDEBytesFrame; 1854 1846 unsigned int DCCMetaSurfaceBytes; ··· 3481 3471 unsigned int HTotal, 3482 3472 unsigned int WritebackLineBufferSize) 3483 3473 { 3474 + (void)WritebackPixelFormat; 3475 + (void)WritebackVRatio; 3484 3476 double DISPCLK_H, DISPCLK_V, DISPCLK_HB; 3485 3477 3486 3478 DISPCLK_H = PixelClock * dml_ceil(WritebackHTaps / 8.0, 1) / WritebackHRatio; ··· 3501 3489 int WritebackSourceHeight, 3502 3490 unsigned int HTotal) 3503 3491 { 3492 + (void)WritebackPixelFormat; 3493 + (void)WritebackHRatio; 3504 3494 double CalculateWriteBackDelay; 3505 3495 double Line_length; 3506 3496 double Output_lines_last_notclamped; ··· 5674 5660 double *Z8StutterExitWatermark, 5675 5661 double *Z8StutterEnterPlusExitWatermark) 5676 5662 { 5663 + (void)DCFCLK; 5664 + (void)ReturnBW; 5665 + (void)DETBufferSizeC; 5677 5666 struct vba_vars_st *v = &mode_lib->vba; 5678 5667 double EffectiveLBLatencyHidingY; 5679 5668 double EffectiveLBLatencyHidingC; ··· 5942 5925 double *UrgentBurstFactorChroma, 5943 5926 bool *NotEnoughUrgentLatencyHiding) 5944 5927 { 5928 + (void)VRatioC; 5945 5929 double LinesInDETLuma; 5946 5930 double LinesInDETChroma; 5947 5931 unsigned int LinesInCursorBuffer; ··· 6326 6308 double TimePerVMRequestVBlank[], 6327 6309 double TimePerVMRequestFlip[]) 6328 6310 { 6311 + (void)dpte_row_width_luma_ub; 6312 + (void)dpte_row_width_chroma_ub; 6329 6313 int num_group_per_lower_vm_stage; 6330 6314 int num_req_per_lower_vm_stage; 6331 6315 int k; ··· 6465 6445 int *Z8NumberOfStutterBurstsPerFrame, 6466 6446 double *StutterPeriod) 6467 6447 { 6448 + (void)ConfigReturnBufferSizeInKByte; 6449 + 6468 6450 struct vba_vars_st *v = &mode_lib->vba; 6469 6451 6470 6452 double DETBufferingTimeY; ··· 6765 6743 bool ViewportSizeSupportPerPlane[], 6766 6744 bool *ViewportSizeSupport) 6767 6745 { 6746 + (void)HRatioChroma; 6768 6747 int MaximumSwathHeightY[DC__NUM_DPP__MAX]; 6769 6748 int MaximumSwathHeightC[DC__NUM_DPP__MAX]; 6770 6749 int MinimumSwathHeightY; ··· 6937 6914 int swath_width_luma_ub[], 6938 6915 int swath_width_chroma_ub[]) 6939 6916 { 6917 + (void)BytePerPixY; 6940 6918 enum odm_combine_mode MainPlaneODMCombine; 6941 6919 int j, k; 6942 6920
+8
drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
··· 139 139 unsigned int delivery_width, 140 140 unsigned int req_per_swath_ub) 141 141 { 142 + (void)mode_lib; 142 143 double refcyc_per_delivery = 0.0; 143 144 144 145 if (vratio <= 1.0) { ··· 873 872 unsigned int cur_width, 874 873 enum cursor_bpp cur_bpp) 875 874 { 875 + (void)mode_lib; 876 876 unsigned int cur_src_width = cur_width; 877 877 unsigned int cur_req_size = 0; 878 878 unsigned int cur_req_width = 0; ··· 946 944 const bool ignore_viewport_pos, 947 945 const bool immediate_flip_support) 948 946 { 947 + (void)cstate_en; 948 + (void)pstate_en; 949 + (void)vm_en; 950 + (void)ignore_viewport_pos; 951 + (void)immediate_flip_support; 952 + (void)dlg_sys_param; 949 953 const display_pipe_source_params_st *src = &e2e_pipe_param[pipe_idx].pipe.src; 950 954 const display_pipe_dest_params_st *dst = &e2e_pipe_param[pipe_idx].pipe.dest; 951 955 const display_clocks_and_cfg_st *clks = &e2e_pipe_param[pipe_idx].clks_cfg;
+2
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
··· 3488 3488 */ 3489 3489 double dcn32_determine_max_vratio_prefetch(struct dc *dc, struct dc_state *context) 3490 3490 { 3491 + (void)dc; 3491 3492 double max_vratio_pre = __DML_MAX_BW_RATIO_PRE__; // Default value is 4 3492 3493 int i; 3493 3494 ··· 3594 3593 3595 3594 void dcn32_set_clock_limits(const struct _vcs_dpi_soc_bounding_box_st *soc_bb) 3596 3595 { 3596 + (void)soc_bb; 3597 3597 dc_assert_fp_enabled(); 3598 3598 dcn3_2_soc.clock_limits[0].dcfclk_mhz = 1200.0; 3599 3599 }
+23
drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
··· 457 457 bool ViewportSizeSupportPerSurface[], 458 458 bool *ViewportSizeSupport) 459 459 { 460 + (void)HRatioChroma; 460 461 unsigned int MaximumSwathHeightY[DC__NUM_DPP__MAX]; 461 462 unsigned int MaximumSwathHeightC[DC__NUM_DPP__MAX]; 462 463 unsigned int RoundedUpMaxSwathSizeBytesY[DC__NUM_DPP__MAX] = { 0 }; ··· 717 716 unsigned int swath_width_luma_ub[], // per-pipe 718 717 unsigned int swath_width_chroma_ub[]) // per-pipe 719 718 { 719 + (void)BytePerPixY; 720 720 unsigned int k, j; 721 721 enum odm_combine_mode MainSurfaceODMMode; 722 722 ··· 2306 2304 unsigned int *DPDE0BytesFrame, 2307 2305 unsigned int *MetaPTEBytesFrame) 2308 2306 { 2307 + (void)SourcePixelFormat; 2309 2308 unsigned int MPDEBytesFrame; 2310 2309 unsigned int DCCMetaSurfaceBytes; 2311 2310 unsigned int ExtraDPDEBytesFrame; ··· 2748 2745 double *UrgentBurstFactorChroma, 2749 2746 bool *NotEnoughUrgentLatencyHiding) 2750 2747 { 2748 + (void)VRatioC; 2751 2749 double LinesInDETLuma; 2752 2750 double LinesInDETChroma; 2753 2751 unsigned int LinesInCursorBuffer; ··· 2904 2900 unsigned int WritebackSourceHeight, 2905 2901 unsigned int HTotal) 2906 2902 { 2903 + (void)WritebackPixelFormat; 2904 + (void)WritebackHRatio; 2907 2905 double CalculateWriteBackDelay; 2908 2906 double Line_length; 2909 2907 double Output_lines_last_notclamped; ··· 2983 2977 /* Output */ 2984 2978 double DCFCLKState[][2]) 2985 2979 { 2980 + (void)MaxAveragePercentOfIdealSDPPortBWDisplayCanUseInNormalSystemOperation; 2981 + (void)ReadBandwidthLuma; 2982 + (void)ReadBandwidthChroma; 2986 2983 unsigned int i, j, k; 2987 2984 unsigned int dummy1; 2988 2985 double dummy2, dummy3; ··· 3456 3447 double *VUpdateWidthPix, 3457 3448 double *VReadyOffsetPix) 3458 3449 { 3450 + (void)SwathWidthY; 3451 + (void)SwathWidthC; 3459 3452 double DPPCLKDelaySubtotalPlusCNVCFormater = v->DPPCLKDelaySubtotal + v->DPPCLKDelayCNVCFormater; 3460 3453 bool MyError = false; 3461 3454 unsigned int DPPCycles, DISPCLKCycles; ··· 4156 4145 double *final_flip_bw, 4157 4146 bool *ImmediateFlipSupportedForPipe) 4158 4147 { 4148 + (void)HostVMMinPageSize; 4159 4149 double min_row_time = 0.0; 4160 4150 unsigned int HostVMDynamicLevelsTrips; 4161 4151 double TimeForFetchingMetaPTEImmediateFlip; ··· 4299 4287 bool *USRRetrainingSupport, 4300 4288 double ActiveDRAMClockChangeLatencyMargin[]) 4301 4289 { 4290 + (void)DCFCLK; 4291 + (void)ReturnBW; 4302 4292 unsigned int i, j, k; 4303 4293 unsigned int SurfaceWithMinActiveFCLKChangeMargin = 0; 4304 4294 unsigned int DRAMClockChangeSupportNumber = 0; ··· 4669 4655 unsigned int WritebackLineBufferSize, 4670 4656 double DISPCLKDPPCLKVCOSpeed) 4671 4657 { 4658 + (void)WritebackPixelFormat; 4659 + (void)WritebackVRatio; 4672 4660 double DISPCLK_H, DISPCLK_V, DISPCLK_HB; 4673 4661 4674 4662 DISPCLK_H = PixelClock * dml_ceil(WritebackHTaps / 8.0, 1) / WritebackHRatio; ··· 5182 5166 double TimePerVMRequestVBlank[], 5183 5167 double TimePerVMRequestFlip[]) 5184 5168 { 5169 + (void)dpte_row_width_luma_ub; 5170 + (void)dpte_row_width_chroma_ub; 5185 5171 unsigned int k; 5186 5172 unsigned int num_group_per_lower_vm_stage; 5187 5173 unsigned int num_req_per_lower_vm_stage; ··· 5339 5321 unsigned int *IndependentBlockLuma, 5340 5322 unsigned int *IndependentBlockChroma) 5341 5323 { 5324 + (void)SurfaceWidthChroma; 5325 + (void)SurfaceHeightChroma; 5326 + (void)TilingFormat; 5327 + (void)BytePerPixelDETY; 5328 + (void)BytePerPixelDETC; 5342 5329 typedef enum { 5343 5330 REQ_256Bytes, 5344 5331 REQ_128BytesNonContiguous,
+2 -6
drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
··· 202 202 203 203 void dcn35_build_wm_range_table_fpu(struct clk_mgr *clk_mgr) 204 204 { 205 + (void)clk_mgr; 205 206 //TODO 206 207 } 207 208 ··· 529 528 } 530 529 531 530 context->bw_ctx.dml.ip.det_buffer_size_kbytes = 384;/*per guide*/ 532 - dc->config.enable_4to1MPC = false; 533 531 534 532 if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) { 535 - if (is_dual_plane(pipe->plane_state->format) 536 - && pipe->plane_state->src_rect.width <= 1920 && 537 - pipe->plane_state->src_rect.height <= 1080) { 538 - dc->config.enable_4to1MPC = true; 539 - } else if (!is_dual_plane(pipe->plane_state->format) && 533 + if (!is_dual_plane(pipe->plane_state->format) && 540 534 pipe->plane_state->src_rect.width <= 5120) { 541 535 /* 542 536 * Limit to 5k max to avoid forced pipe split when there
+1 -6
drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
··· 561 561 } 562 562 563 563 context->bw_ctx.dml.ip.det_buffer_size_kbytes = 384;/*per guide*/ 564 - dc->config.enable_4to1MPC = false; 565 564 566 565 if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) { 567 - if (is_dual_plane(pipe->plane_state->format) 568 - && pipe->plane_state->src_rect.width <= 1920 && 569 - pipe->plane_state->src_rect.height <= 1080) { 570 - dc->config.enable_4to1MPC = true; 571 - } else if (!is_dual_plane(pipe->plane_state->format) && 566 + if (!is_dual_plane(pipe->plane_state->format) && 572 567 pipe->plane_state->src_rect.width <= 5120) { 573 568 /* 574 569 * Limit to 5k max to avoid forced pipe split when there
+1
drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
··· 162 162 display_e2e_pipe_params_st *pipes, 163 163 int pipe_cnt) 164 164 { 165 + (void)mode_lib; 165 166 display_pipe_source_params_st *pipe_src; 166 167 display_pipe_dest_params_st *pipe_dest; 167 168 scaler_ratio_depth_st *scale_ratio_depth;
+14
drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
··· 49 49 50 50 void print__data_rq_sizing_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_sizing_params_st *rq_sizing) 51 51 { 52 + (void)mode_lib; 53 + (void)rq_sizing; 52 54 dml_print("DML_RQ_DLG_CALC: =====================================\n"); 53 55 dml_print("DML_RQ_DLG_CALC: DISPLAY_DATA_RQ_SIZING_PARAM_ST\n"); 54 56 dml_print("DML_RQ_DLG_CALC: chunk_bytes = %0d\n", rq_sizing->chunk_bytes); ··· 66 64 67 65 void print__data_rq_dlg_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_dlg_params_st *rq_dlg_param) 68 66 { 67 + (void)mode_lib; 68 + (void)rq_dlg_param; 69 69 dml_print("DML_RQ_DLG_CALC: =====================================\n"); 70 70 dml_print("DML_RQ_DLG_CALC: DISPLAY_DATA_RQ_DLG_PARAM_ST\n"); 71 71 dml_print( ··· 111 107 112 108 void print__data_rq_misc_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_misc_params_st *rq_misc_param) 113 109 { 110 + (void)mode_lib; 111 + (void)rq_misc_param; 114 112 dml_print("DML_RQ_DLG_CALC: =====================================\n"); 115 113 dml_print("DML_RQ_DLG_CALC: DISPLAY_DATA_RQ_MISC_PARAM_ST\n"); 116 114 dml_print( ··· 130 124 131 125 void print__dlg_sys_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_dlg_sys_params_st *dlg_sys_param) 132 126 { 127 + (void)dlg_sys_param; 128 + (void)mode_lib; 133 129 dml_print("DML_RQ_DLG_CALC: =====================================\n"); 134 130 dml_print("DML_RQ_DLG_CALC: DISPLAY_RQ_DLG_PARAM_ST\n"); 135 131 dml_print("DML_RQ_DLG_CALC: t_mclk_wm_us = %3.2f\n", dlg_sys_param->t_mclk_wm_us); ··· 152 144 153 145 void print__data_rq_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_regs_st *rq_regs) 154 146 { 147 + (void)mode_lib; 148 + (void)rq_regs; 155 149 dml_print("DML_RQ_DLG_CALC: =====================================\n"); 156 150 dml_print("DML_RQ_DLG_CALC: DISPLAY_DATA_RQ_REGS_ST\n"); 157 151 dml_print("DML_RQ_DLG_CALC: chunk_size = 0x%0x\n", rq_regs->chunk_size); ··· 189 179 190 180 void print__dlg_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_dlg_regs_st *dlg_regs) 191 181 { 182 + (void)dlg_regs; 183 + (void)mode_lib; 192 184 dml_print("DML_RQ_DLG_CALC: =====================================\n"); 193 185 dml_print("DML_RQ_DLG_CALC: DISPLAY_DLG_REGS_ST\n"); 194 186 dml_print( ··· 328 316 329 317 void print__ttu_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_ttu_regs_st *ttu_regs) 330 318 { 319 + (void)mode_lib; 320 + (void)ttu_regs; 331 321 dml_print("DML_RQ_DLG_CALC: =====================================\n"); 332 322 dml_print("DML_RQ_DLG_CALC: DISPLAY_TTU_REGS_ST\n"); 333 323 dml_print(
+3
drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
··· 104 104 unsigned int delivery_width, 105 105 unsigned int req_per_swath_ub) 106 106 { 107 + (void)mode_lib; 107 108 double refcyc_per_delivery = 0.0; 108 109 109 110 if (vratio <= 1.0) { ··· 134 133 double vinit, 135 134 double l_sw) 136 135 { 136 + (void)mode_lib; 137 137 double prefill = dml_floor(vinit, 1); 138 138 double vratio_pre = 1.0; 139 139 ··· 176 174 unsigned int swath_height, 177 175 double vinit) 178 176 { 177 + (void)mode_lib; 179 178 double prefill = dml_floor(vinit, 1); 180 179 unsigned int max_partial_sw_int; 181 180
+22 -52
drivers/gpu/drm/amd/display/dc/dml2_0/Makefile
··· 53 53 subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/inc 54 54 subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/ 55 55 56 - CFLAGS_$(AMDDALPATH)/dc/dml2_0/display_mode_core.o := $(dml2_ccflags) $(frame_warn_flag) 57 - CFLAGS_$(AMDDALPATH)/dc/dml2_0/display_mode_util.o := $(dml2_ccflags) 58 - CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_wrapper_fpu.o := $(dml2_ccflags) 59 - CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_utils.o := $(dml2_ccflags) 60 - CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_policy.o := $(dml2_ccflags) 61 - CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_translation_helper.o := $(dml2_ccflags) 62 - CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_mall_phantom.o := $(dml2_ccflags) 63 - CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml_display_rq_dlg_calc.o := $(dml2_ccflags) 64 - CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_dc_resource_mgmt.o := $(dml2_ccflags) 56 + # Add FPU flags to all dml2 files by default, remove NO_FPU flags. 57 + # FPU flags step 1: Find all .c files in dal/dc/dml2_0 and it's subfolders 58 + DML2_ABS_PATH := $(FULL_AMD_DISPLAY_PATH)/dc/dml2_0 59 + DML2_C_FILES := $(shell find $(DML2_ABS_PATH) -name '*.c' -type f) 65 60 61 + # FPU flags step 2: Convert to .o and make paths relative to $(AMDDALPATH)/dc/dml2_0/ 62 + DML2_RELATIVE_O_FILES := $(patsubst $(DML2_ABS_PATH)/%,dc/dml2_0/%,$(patsubst %.c,%.o,$(DML2_C_FILES))) 63 + 64 + # FPU flags step 3: Apply FPU flags to all .o files from dal/dc/dml2_0 and it's subfolders 65 + $(foreach obj,$(DML2_RELATIVE_O_FILES),$(eval CFLAGS_$(AMDDALPATH)/$(obj) := $(dml2_ccflags))) 66 + $(foreach obj,$(DML2_RELATIVE_O_FILES),$(eval CFLAGS_REMOVE_$(AMDDALPATH)/$(obj) := $(dml2_rcflags))) 67 + 68 + # FPU flags step 4: Replace CFLAGS per file for files with additional flags beyond dml2_ccflags and dml2_rcflags 69 + CFLAGS_$(AMDDALPATH)/dc/dml2_0/display_mode_core.o := $(dml2_ccflags) $(frame_warn_flag) 70 + CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_ccflags) $(frame_warn_flag) 71 + CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_ccflags) $(frame_warn_flag) 72 + CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_wrapper.o := $(dml2_rcflags) 73 + CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_wrapper.o := $(dml2_rcflags) 66 74 CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/display_mode_core.o := $(dml2_rcflags) 67 - CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/display_mode_util.o := $(dml2_rcflags) 68 - CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_wrapper_fpu.o := $(dml2_rcflags) 69 - CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_utils.o := $(dml2_rcflags) 70 - CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_policy.o := $(dml2_rcflags) 71 - CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_translation_helper.o := $(dml2_rcflags) 72 - CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_mall_phantom.o := $(dml2_rcflags) 73 - CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml_display_rq_dlg_calc.o := $(dml2_rcflags) 74 - CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_dc_resource_mgmt.o := $(dml2_rcflags) 75 + CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_rcflags) 76 + CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_rcflags) 77 + CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_wrapper.o := $(dml2_ccflags) 78 + CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_wrapper.o := $(dml2_ccflags) 75 79 76 80 DML2 = display_mode_core.o display_mode_util.o dml2_wrapper_fpu.o dml2_wrapper.o \ 77 81 dml2_utils.o dml2_policy.o dml2_translation_helper.o dml2_dc_resource_mgmt.o dml2_mall_phantom.o \ ··· 85 81 86 82 AMD_DISPLAY_FILES += $(AMD_DAL_DML2) 87 83 88 - CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_ccflags) 89 - CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_ccflags) $(frame_warn_flag) 90 - CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_ccflags) $(frame_warn_flag) 91 - CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_ccflags) 92 - CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_ccflags) 93 - CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_ccflags) 94 - CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_ccflags) 95 - CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_ccflags) 96 - CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_ccflags) 97 - CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn42.o := $(dml2_ccflags) 98 - CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_ccflags) 99 - CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_ccflags) 100 - CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_ccflags) 101 - CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_ccflags) 102 - CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_ccflags) 103 - CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_translation_helper.o := $(dml2_ccflags) 104 - CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_utils.o := $(dml2_ccflags) 105 - 106 - CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_rcflags) 107 - CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_rcflags) 108 - CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_rcflags) 109 - CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_rcflags) 110 - CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_rcflags) 111 - CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_rcflags) 112 - CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_rcflags) 113 - CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_rcflags) 114 - CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_rcflags) 115 - CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn42.o := $(dml2_rcflags) 116 - CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_rcflags) 117 - CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_rcflags) 118 - CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_rcflags) 119 - CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_rcflags) 120 - CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_rcflags) 121 - CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_translation_helper.o := $(dml2_rcflags) 122 - CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_utils.o := $(dml2_rcflags) 123 84 124 85 DML21 := src/dml2_top/dml2_top_interfaces.o 125 86 DML21 += src/dml2_top/dml2_top_soc15.o ··· 103 134 DML21 += src/dml2_standalone_libraries/lib_float_math.o 104 135 DML21 += dml21_translation_helper.o 105 136 DML21 += dml21_wrapper.o 137 + DML21 += dml21_wrapper_fpu.o 106 138 DML21 += dml21_utils.o 107 139 108 140 AMD_DAL_DML21 = $(addprefix $(AMDDALPATH)/dc/dml2_0/dml21/,$(DML21))
+25 -14
drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.c
··· 90 90 struct pipe_ctx *pipe_ctx, 91 91 struct dml2_context *dml_ctx) 92 92 { 93 - unsigned int hblank_start, vblank_start, min_hardware_refresh_in_uhz; 93 + unsigned int hblank_start, vblank_start; 94 + uint64_t min_hardware_refresh_in_uhz; 94 95 uint32_t pix_clk_100hz; 95 96 96 97 timing->h_active = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right + pipe_ctx->dsc_padding_params.dsc_hactive_padding; ··· 106 105 timing->h_total = stream->timing.h_total + pipe_ctx->dsc_padding_params.dsc_htotal_padding; 107 106 timing->v_total = stream->timing.v_total; 108 107 timing->h_sync_width = stream->timing.h_sync_width; 109 - timing->interlaced = stream->timing.flags.INTERLACE; 108 + timing->interlaced = (stream->timing.flags.INTERLACE != 0); 110 109 111 110 hblank_start = stream->timing.h_total - stream->timing.h_front_porch; 112 111 ··· 138 137 (timing->h_total * (long long)calc_max_hardware_v_total(stream))); 139 138 } 140 139 141 - timing->drr_config.min_refresh_uhz = max(stream->timing.min_refresh_in_uhz, min_hardware_refresh_in_uhz); 140 + { 141 + uint64_t min_refresh = max((uint64_t)stream->timing.min_refresh_in_uhz, min_hardware_refresh_in_uhz); 142 + ASSERT(min_refresh <= ULONG_MAX); 143 + timing->drr_config.min_refresh_uhz = (unsigned long)min_refresh; 144 + } 142 145 143 146 if (dml_ctx->config.callbacks.get_max_flickerless_instant_vtotal_increase && 144 147 stream->ctx->dc->config.enable_fpo_flicker_detection == 1) ··· 606 601 607 602 plane->composition.viewport.stationary = false; 608 603 609 - if (plane_state->cm.flags.bits.lut3d_dma_enable) { 604 + if (plane_state->mcm_luts.lut3d_data.lut3d_src == DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM) { 610 605 plane->tdlut.setup_for_tdlut = true; 611 606 612 - switch (plane_state->cm.lut3d_dma.swizzle) { 613 - case CM_LUT_3D_SWIZZLE_LINEAR_RGB: 614 - case CM_LUT_3D_SWIZZLE_LINEAR_BGR: 607 + switch (plane_state->mcm_luts.lut3d_data.gpu_mem_params.layout) { 608 + case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_RGB: 609 + case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_BGR: 615 610 plane->tdlut.tdlut_addressing_mode = dml2_tdlut_sw_linear; 616 611 break; 617 - case CM_LUT_1D_PACKED_LINEAR: 618 - default: 612 + case DC_CM2_GPU_MEM_LAYOUT_1D_PACKED_LINEAR: 619 613 plane->tdlut.tdlut_addressing_mode = dml2_tdlut_simple_linear; 620 614 break; 621 615 } 622 616 623 - switch (plane_state->cm.lut3d_dma.size) { 624 - case CM_LUT_SIZE_333333: 617 + switch (plane_state->mcm_luts.lut3d_data.gpu_mem_params.size) { 618 + case DC_CM2_GPU_MEM_SIZE_171717: 619 + plane->tdlut.tdlut_width_mode = dml2_tdlut_width_17_cube; 620 + break; 621 + case DC_CM2_GPU_MEM_SIZE_333333: 625 622 plane->tdlut.tdlut_width_mode = dml2_tdlut_width_33_cube; 626 623 break; 627 - case CM_LUT_SIZE_171717: 624 + // handling when use case and HW support available 625 + case DC_CM2_GPU_MEM_SIZE_454545: 626 + case DC_CM2_GPU_MEM_SIZE_656565: 627 + break; 628 + case DC_CM2_GPU_MEM_SIZE_TRANSFORMED: 628 629 default: 629 - plane->tdlut.tdlut_width_mode = dml2_tdlut_width_17_cube; 630 + //plane->tdlut.tdlut_width_mode = dml2_tdlut_width_flatten; // dml2_tdlut_width_flatten undefined 630 631 break; 631 632 } 632 633 } ··· 702 691 703 692 if (!dml21_wrapper_get_plane_id(context, stream_id, plane, &plane_id)) { 704 693 ASSERT(false); 705 - return -1; 694 + return UINT_MAX; 706 695 } 707 696 708 697 for (i = 0; i < __DML2_WRAPPER_MAX_STREAMS_PLANES__; i++) {
+11 -4
drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.c
··· 420 420 type = static_base_state->stream_v1.base.type; 421 421 422 422 /* get information from context */ 423 - static_base_state->stream_v1.base.num_planes = context->stream_status[dc_stream_idx].plane_count; 424 - static_base_state->stream_v1.base.otg_inst = context->stream_status[dc_stream_idx].primary_otg_inst; 423 + ASSERT(context->stream_status[dc_stream_idx].plane_count >= 0 && 424 + context->stream_status[dc_stream_idx].plane_count <= 0xFF); 425 + ASSERT(context->stream_status[dc_stream_idx].primary_otg_inst >= 0 && 426 + context->stream_status[dc_stream_idx].primary_otg_inst <= 0xFF); 427 + static_base_state->stream_v1.base.num_planes = (uint8_t)context->stream_status[dc_stream_idx].plane_count; 428 + static_base_state->stream_v1.base.otg_inst = (uint8_t)context->stream_status[dc_stream_idx].primary_otg_inst; 425 429 426 430 /* populate pipe masks for planes */ 427 431 for (dc_plane_idx = 0; dc_plane_idx < context->stream_status[dc_stream_idx].plane_count; dc_plane_idx++) { ··· 462 458 switch (dc->debug.fams_version.minor) { 463 459 case 1: 464 460 default: 465 - static_sub_state->stream_v1.sub_state.subvp.phantom_otg_inst = phantom_status->primary_otg_inst; 461 + ASSERT(phantom_status->primary_otg_inst >= 0 && 462 + phantom_status->primary_otg_inst <= 0xFF); 463 + static_sub_state->stream_v1.sub_state.subvp.phantom_otg_inst = (uint8_t)phantom_status->primary_otg_inst; 466 464 467 465 /* populate pipe masks for phantom planes */ 468 466 for (dc_plane_idx = 0; dc_plane_idx < phantom_status->plane_count; dc_plane_idx++) { ··· 522 516 context->bw_ctx.bw.dcn.fams2_global_config.num_streams = num_fams2_streams; 523 517 } 524 518 525 - context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable; 519 + context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = 520 + (context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable != 0); 526 521 } 527 522 528 523 bool dml21_is_plane1_enabled(enum dml2_source_format_class source_format)
+6 -374
drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.c
··· 9 9 #include "dml21_utils.h" 10 10 #include "dml21_translation_helper.h" 11 11 #include "dml2_dc_resource_mgmt.h" 12 + #include "dml2_wrapper.h" 13 + #include "dml2_wrapper_fpu.h" 14 + #include "dml21_wrapper.h" 15 + #include "dml21_wrapper_fpu.h" 12 16 #include "dc_fpu.h" 13 17 14 18 #if !defined(DC_RUN_WITH_PREEMPTION_ENABLED) ··· 38 34 (*dml_ctx)->v21.mode_programming.display_config = (*dml_ctx)->v21.mode_support.display_config; 39 35 40 36 DC_RUN_WITH_PREEMPTION_ENABLED((*dml_ctx)->v21.mode_programming.programming = vzalloc(sizeof(struct dml2_display_cfg_programming))); 37 + 41 38 if (!((*dml_ctx)->v21.mode_programming.programming)) 42 39 return false; 43 40 44 41 return true; 45 42 } 46 43 47 - static void dml21_populate_configuration_options(const struct dc *in_dc, 48 - struct dml2_context *dml_ctx, 49 - const struct dml2_configuration_options *config) 50 - { 51 - dml_ctx->config = *config; 52 - 53 - /* UCLK P-State options */ 54 - if (in_dc->debug.dml21_force_pstate_method) { 55 - dml_ctx->config.pmo.force_pstate_method_enable = true; 56 - for (int i = 0; i < MAX_PIPES; i++) 57 - dml_ctx->config.pmo.force_pstate_method_values[i] = in_dc->debug.dml21_force_pstate_method_values[i]; 58 - } else { 59 - dml_ctx->config.pmo.force_pstate_method_enable = false; 60 - } 61 - } 62 - 63 - static void dml21_init(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config) 64 - { 65 - 66 - dml_ctx->architecture = dml2_architecture_21; 67 - 68 - dml21_populate_configuration_options(in_dc, dml_ctx, config); 69 - 70 - DC_FP_START(); 71 - 72 - dml21_populate_dml_init_params(&dml_ctx->v21.dml_init, &dml_ctx->config, in_dc); 73 - 74 - dml2_initialize_instance(&dml_ctx->v21.dml_init); 75 - 76 - DC_FP_END(); 77 - } 78 - 79 44 bool dml21_create(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config) 80 45 { 81 46 /* Allocate memory for initializing DML21 instance */ 82 - if (!dml21_allocate_memory(dml_ctx)) { 47 + if (!dml21_allocate_memory(dml_ctx)) 83 48 return false; 84 - } 85 49 86 50 dml21_init(in_dc, *dml_ctx, config); 87 51 ··· 60 88 { 61 89 vfree(dml2->v21.dml_init.dml2_instance); 62 90 vfree(dml2->v21.mode_programming.programming); 63 - } 64 - 65 - static void dml21_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *context, struct resource_context *out_new_hw_state, 66 - struct dml2_context *in_ctx, unsigned int pipe_cnt) 67 - { 68 - unsigned int dml_prog_idx = 0, dc_pipe_index = 0, num_dpps_required = 0; 69 - struct dml2_per_plane_programming *pln_prog = NULL; 70 - struct dml2_per_stream_programming *stream_prog = NULL; 71 - struct pipe_ctx *dc_main_pipes[__DML2_WRAPPER_MAX_STREAMS_PLANES__]; 72 - struct pipe_ctx *dc_phantom_pipes[__DML2_WRAPPER_MAX_STREAMS_PLANES__] = {0}; 73 - int num_pipes; 74 - unsigned int dml_phantom_prog_idx; 75 - 76 - context->bw_ctx.bw.dcn.clk.dppclk_khz = 0; 77 - 78 - /* copy global DCHUBBUB arbiter registers */ 79 - memcpy(&context->bw_ctx.bw.dcn.arb_regs, &in_ctx->v21.mode_programming.programming->global_regs.arb_regs, sizeof(struct dml2_display_arb_regs)); 80 - 81 - /* legacy only */ 82 - context->bw_ctx.bw.dcn.compbuf_size_kb = (int)in_ctx->v21.mode_programming.programming->global_regs.arb_regs.compbuf_size * 64; 83 - 84 - context->bw_ctx.bw.dcn.mall_ss_size_bytes = 0; 85 - context->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes = 0; 86 - context->bw_ctx.bw.dcn.mall_subvp_size_bytes = 0; 87 - 88 - /* phantom's start after main planes */ 89 - dml_phantom_prog_idx = in_ctx->v21.mode_programming.programming->display_config.num_planes; 90 - 91 - for (dml_prog_idx = 0; dml_prog_idx < DML2_MAX_PLANES; dml_prog_idx++) { 92 - pln_prog = &in_ctx->v21.mode_programming.programming->plane_programming[dml_prog_idx]; 93 - 94 - if (!pln_prog->plane_descriptor) 95 - continue; 96 - 97 - stream_prog = &in_ctx->v21.mode_programming.programming->stream_programming[pln_prog->plane_descriptor->stream_index]; 98 - num_dpps_required = pln_prog->num_dpps_required; 99 - 100 - if (num_dpps_required == 0) { 101 - continue; 102 - } 103 - num_pipes = dml21_find_dc_pipes_for_plane(dc, context, in_ctx, dc_main_pipes, dc_phantom_pipes, dml_prog_idx); 104 - 105 - if (num_pipes <= 0) 106 - continue; 107 - 108 - /* program each pipe */ 109 - for (dc_pipe_index = 0; dc_pipe_index < num_pipes; dc_pipe_index++) { 110 - dml21_program_dc_pipe(in_ctx, context, dc_main_pipes[dc_pipe_index], pln_prog, stream_prog); 111 - 112 - if (pln_prog->phantom_plane.valid && dc_phantom_pipes[dc_pipe_index]) { 113 - dml21_program_dc_pipe(in_ctx, context, dc_phantom_pipes[dc_pipe_index], pln_prog, stream_prog); 114 - } 115 - } 116 - 117 - /* copy per plane mcache allocation */ 118 - memcpy(&context->bw_ctx.bw.dcn.mcache_allocations[dml_prog_idx], &pln_prog->mcache_allocation, sizeof(struct dml2_mcache_surface_allocation)); 119 - if (pln_prog->phantom_plane.valid) { 120 - memcpy(&context->bw_ctx.bw.dcn.mcache_allocations[dml_phantom_prog_idx], 121 - &pln_prog->phantom_plane.mcache_allocation, 122 - sizeof(struct dml2_mcache_surface_allocation)); 123 - 124 - dml_phantom_prog_idx++; 125 - } 126 - } 127 - 128 - /* assign global clocks */ 129 - context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz; 130 - context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz; 131 - if (in_ctx->v21.dml_init.soc_bb.clk_table.dispclk.num_clk_values > 1) { 132 - context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = 133 - in_ctx->v21.dml_init.soc_bb.clk_table.dispclk.clk_values_khz[in_ctx->v21.dml_init.soc_bb.clk_table.dispclk.num_clk_values] * 1000; 134 - } else { 135 - context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = in_ctx->v21.dml_init.soc_bb.clk_table.dispclk.clk_values_khz[0] * 1000; 136 - } 137 - 138 - if (in_ctx->v21.dml_init.soc_bb.clk_table.dppclk.num_clk_values > 1) { 139 - context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = 140 - in_ctx->v21.dml_init.soc_bb.clk_table.dppclk.clk_values_khz[in_ctx->v21.dml_init.soc_bb.clk_table.dppclk.num_clk_values] * 1000; 141 - } else { 142 - context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = in_ctx->v21.dml_init.soc_bb.clk_table.dppclk.clk_values_khz[0] * 1000; 143 - } 144 - 145 - /* get global mall allocation */ 146 - if (dc->res_pool->funcs->calculate_mall_ways_from_bytes) { 147 - context->bw_ctx.bw.dcn.clk.num_ways = dc->res_pool->funcs->calculate_mall_ways_from_bytes(dc, context->bw_ctx.bw.dcn.mall_subvp_size_bytes); 148 - } else { 149 - context->bw_ctx.bw.dcn.clk.num_ways = 0; 150 - } 151 - } 152 - 153 - static void dml21_prepare_mcache_params(struct dml2_context *dml_ctx, struct dc_state *context, struct dc_mcache_params *mcache_params) 154 - { 155 - int dc_plane_idx = 0; 156 - int dml_prog_idx, stream_idx, plane_idx; 157 - struct dml2_per_plane_programming *pln_prog = NULL; 158 - 159 - for (stream_idx = 0; stream_idx < context->stream_count; stream_idx++) { 160 - for (plane_idx = 0; plane_idx < context->stream_status[stream_idx].plane_count; plane_idx++) { 161 - dml_prog_idx = map_plane_to_dml21_display_cfg(dml_ctx, context->streams[stream_idx]->stream_id, context->stream_status[stream_idx].plane_states[plane_idx], context); 162 - if (dml_prog_idx == INVALID) { 163 - continue; 164 - } 165 - pln_prog = &dml_ctx->v21.mode_programming.programming->plane_programming[dml_prog_idx]; 166 - mcache_params[dc_plane_idx].valid = pln_prog->mcache_allocation.valid; 167 - mcache_params[dc_plane_idx].num_mcaches_plane0 = pln_prog->mcache_allocation.num_mcaches_plane0; 168 - mcache_params[dc_plane_idx].num_mcaches_plane1 = pln_prog->mcache_allocation.num_mcaches_plane1; 169 - mcache_params[dc_plane_idx].requires_dedicated_mall_mcache = pln_prog->mcache_allocation.requires_dedicated_mall_mcache; 170 - mcache_params[dc_plane_idx].last_slice_sharing.plane0_plane1 = pln_prog->mcache_allocation.last_slice_sharing.plane0_plane1; 171 - memcpy(mcache_params[dc_plane_idx].mcache_x_offsets_plane0, 172 - pln_prog->mcache_allocation.mcache_x_offsets_plane0, 173 - sizeof(int) * (DML2_MAX_MCACHES + 1)); 174 - memcpy(mcache_params[dc_plane_idx].mcache_x_offsets_plane1, 175 - pln_prog->mcache_allocation.mcache_x_offsets_plane1, 176 - sizeof(int) * (DML2_MAX_MCACHES + 1)); 177 - dc_plane_idx++; 178 - } 179 - } 180 - } 181 - 182 - static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx) 183 - { 184 - bool result = false; 185 - struct dml2_build_mode_programming_in_out *mode_programming = &dml_ctx->v21.mode_programming; 186 - struct dc_mcache_params mcache_params[MAX_PLANES] = {0}; 187 - 188 - memset(&dml_ctx->v21.display_config, 0, sizeof(struct dml2_display_cfg)); 189 - memset(&dml_ctx->v21.dml_to_dc_pipe_mapping, 0, sizeof(struct dml2_dml_to_dc_pipe_mapping)); 190 - memset(&dml_ctx->v21.mode_programming.dml2_instance->scratch.build_mode_programming_locals.mode_programming_params, 0, sizeof(struct dml2_core_mode_programming_in_out)); 191 - 192 - if (!context) 193 - return true; 194 - 195 - if (context->stream_count == 0) { 196 - dml21_init_min_clocks_for_dc_state(dml_ctx, context); 197 - dml21_build_fams2_programming(in_dc, context, dml_ctx); 198 - return true; 199 - } 200 - 201 - /* scrub phantom's from current dc_state */ 202 - dml_ctx->config.svp_pstate.callbacks.remove_phantom_streams_and_planes(in_dc, context); 203 - dml_ctx->config.svp_pstate.callbacks.release_phantom_streams_and_planes(in_dc, context); 204 - 205 - /* Populate stream, plane mappings and other fields in display config. */ 206 - result = dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx); 207 - if (!result) 208 - return false; 209 - 210 - DC_FP_START(); 211 - result = dml2_build_mode_programming(mode_programming); 212 - DC_FP_END(); 213 - if (!result) 214 - return false; 215 - 216 - /* Check and map HW resources */ 217 - if (result && !dml_ctx->config.skip_hw_state_mapping) { 218 - dml21_map_hw_resources(dml_ctx); 219 - dml2_map_dc_pipes(dml_ctx, context, NULL, &dml_ctx->v21.dml_to_dc_pipe_mapping, in_dc->current_state); 220 - /* if subvp phantoms are present, expand them into dc context */ 221 - dml21_handle_phantom_streams_planes(in_dc, context, dml_ctx); 222 - 223 - if (in_dc->res_pool->funcs->program_mcache_pipe_config) { 224 - //Prepare mcache params for each plane based on mcache output from DML 225 - dml21_prepare_mcache_params(dml_ctx, context, mcache_params); 226 - 227 - //populate mcache regs to each pipe 228 - dml_ctx->config.callbacks.allocate_mcache(context, mcache_params); 229 - } 230 - } 231 - 232 - /* Copy DML CLK, WM and REG outputs to bandwidth context */ 233 - if (result && !dml_ctx->config.skip_hw_state_mapping) { 234 - dml21_calculate_rq_and_dlg_params(in_dc, context, &context->res_ctx, dml_ctx, in_dc->res_pool->pipe_count); 235 - dml21_copy_clocks_to_dc_state(dml_ctx, context); 236 - dml21_extract_watermark_sets(in_dc, &context->bw_ctx.bw.dcn.watermarks, dml_ctx); 237 - dml21_build_fams2_programming(in_dc, context, dml_ctx); 238 - } 239 - 240 - return true; 241 - } 242 - 243 - static bool dml21_check_mode_support(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx) 244 - { 245 - bool is_supported = false; 246 - struct dml2_initialize_instance_in_out *dml_init = &dml_ctx->v21.dml_init; 247 - struct dml2_check_mode_supported_in_out *mode_support = &dml_ctx->v21.mode_support; 248 - 249 - memset(&dml_ctx->v21.display_config, 0, sizeof(struct dml2_display_cfg)); 250 - memset(&dml_ctx->v21.dml_to_dc_pipe_mapping, 0, sizeof(struct dml2_dml_to_dc_pipe_mapping)); 251 - memset(&dml_ctx->v21.mode_programming.dml2_instance->scratch.check_mode_supported_locals.mode_support_params, 0, sizeof(struct dml2_core_mode_support_in_out)); 252 - 253 - if (!context || context->stream_count == 0) 254 - return true; 255 - 256 - /* Scrub phantom's from current dc_state */ 257 - dml_ctx->config.svp_pstate.callbacks.remove_phantom_streams_and_planes(in_dc, context); 258 - dml_ctx->config.svp_pstate.callbacks.release_phantom_streams_and_planes(in_dc, context); 259 - 260 - mode_support->dml2_instance = dml_init->dml2_instance; 261 - dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx); 262 - dml_ctx->v21.mode_programming.dml2_instance->scratch.build_mode_programming_locals.mode_programming_params.programming = dml_ctx->v21.mode_programming.programming; 263 - DC_FP_START(); 264 - is_supported = dml2_check_mode_supported(mode_support); 265 - DC_FP_END(); 266 - if (!is_supported) 267 - return false; 268 - 269 - return true; 270 - } 271 - 272 - bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx, 273 - enum dc_validate_mode validate_mode) 274 - { 275 - bool out = false; 276 - 277 - /* Use dml21_check_mode_support for DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX path */ 278 - if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) 279 - out = dml21_check_mode_support(in_dc, context, dml_ctx); 280 - else 281 - out = dml21_mode_check_and_programming(in_dc, context, dml_ctx); 282 - 283 - return out; 284 - } 285 - 286 - void dml21_prepare_mcache_programming(struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx) 287 - { 288 - unsigned int dml_prog_idx, dml_phantom_prog_idx, dc_pipe_index; 289 - int num_pipes; 290 - struct pipe_ctx *dc_main_pipes[__DML2_WRAPPER_MAX_STREAMS_PLANES__]; 291 - struct pipe_ctx *dc_phantom_pipes[__DML2_WRAPPER_MAX_STREAMS_PLANES__] = {0}; 292 - 293 - struct dml2_per_plane_programming *pln_prog = NULL; 294 - struct dml2_plane_mcache_configuration_descriptor *mcache_config = NULL; 295 - struct prepare_mcache_programming_locals *l = &dml_ctx->v21.scratch.prepare_mcache_locals; 296 - 297 - if (context->stream_count == 0) { 298 - return; 299 - } 300 - 301 - memset(&l->build_mcache_programming_params, 0, sizeof(struct dml2_build_mcache_programming_in_out)); 302 - l->build_mcache_programming_params.dml2_instance = dml_ctx->v21.dml_init.dml2_instance; 303 - 304 - /* phantom's start after main planes */ 305 - dml_phantom_prog_idx = dml_ctx->v21.mode_programming.programming->display_config.num_planes; 306 - 307 - /* Build mcache programming parameters per plane per pipe */ 308 - for (dml_prog_idx = 0; dml_prog_idx < dml_ctx->v21.mode_programming.programming->display_config.num_planes; dml_prog_idx++) { 309 - pln_prog = &dml_ctx->v21.mode_programming.programming->plane_programming[dml_prog_idx]; 310 - 311 - mcache_config = &l->build_mcache_programming_params.mcache_configurations[dml_prog_idx]; 312 - memset(mcache_config, 0, sizeof(struct dml2_plane_mcache_configuration_descriptor)); 313 - mcache_config->plane_descriptor = pln_prog->plane_descriptor; 314 - mcache_config->mcache_allocation = &context->bw_ctx.bw.dcn.mcache_allocations[dml_prog_idx]; 315 - mcache_config->num_pipes = pln_prog->num_dpps_required; 316 - l->build_mcache_programming_params.num_configurations++; 317 - 318 - if (pln_prog->num_dpps_required == 0) { 319 - continue; 320 - } 321 - 322 - num_pipes = dml21_find_dc_pipes_for_plane(in_dc, context, dml_ctx, dc_main_pipes, dc_phantom_pipes, dml_prog_idx); 323 - if (num_pipes <= 0 || dc_main_pipes[0]->stream == NULL || 324 - dc_main_pipes[0]->plane_state == NULL) 325 - continue; 326 - 327 - /* get config for each pipe */ 328 - for (dc_pipe_index = 0; dc_pipe_index < num_pipes; dc_pipe_index++) { 329 - ASSERT(dc_main_pipes[dc_pipe_index]); 330 - dml21_get_pipe_mcache_config(context, dc_main_pipes[dc_pipe_index], pln_prog, &mcache_config->pipe_configurations[dc_pipe_index]); 331 - } 332 - 333 - /* get config for each phantom pipe */ 334 - if (pln_prog->phantom_plane.valid && 335 - dc_phantom_pipes[0] && 336 - dc_main_pipes[0]->stream && 337 - dc_phantom_pipes[0]->plane_state) { 338 - mcache_config = &l->build_mcache_programming_params.mcache_configurations[dml_phantom_prog_idx]; 339 - memset(mcache_config, 0, sizeof(struct dml2_plane_mcache_configuration_descriptor)); 340 - mcache_config->plane_descriptor = pln_prog->plane_descriptor; 341 - mcache_config->mcache_allocation = &context->bw_ctx.bw.dcn.mcache_allocations[dml_phantom_prog_idx]; 342 - mcache_config->num_pipes = pln_prog->num_dpps_required; 343 - l->build_mcache_programming_params.num_configurations++; 344 - 345 - for (dc_pipe_index = 0; dc_pipe_index < num_pipes; dc_pipe_index++) { 346 - ASSERT(dc_phantom_pipes[dc_pipe_index]); 347 - dml21_get_pipe_mcache_config(context, dc_phantom_pipes[dc_pipe_index], pln_prog, &mcache_config->pipe_configurations[dc_pipe_index]); 348 - } 349 - 350 - /* increment phantom index */ 351 - dml_phantom_prog_idx++; 352 - } 353 - } 354 - 355 - /* Call to generate mcache programming per plane per pipe for the given display configuration */ 356 - dml2_build_mcache_programming(&l->build_mcache_programming_params); 357 - 358 - /* get per plane per pipe mcache programming */ 359 - for (dml_prog_idx = 0; dml_prog_idx < dml_ctx->v21.mode_programming.programming->display_config.num_planes; dml_prog_idx++) { 360 - pln_prog = &dml_ctx->v21.mode_programming.programming->plane_programming[dml_prog_idx]; 361 - 362 - num_pipes = dml21_find_dc_pipes_for_plane(in_dc, context, dml_ctx, dc_main_pipes, dc_phantom_pipes, dml_prog_idx); 363 - if (num_pipes <= 0 || dc_main_pipes[0]->stream == NULL || 364 - dc_main_pipes[0]->plane_state == NULL) 365 - continue; 366 - 367 - /* get config for each pipe */ 368 - for (dc_pipe_index = 0; dc_pipe_index < num_pipes; dc_pipe_index++) { 369 - ASSERT(dc_main_pipes[dc_pipe_index]); 370 - if (l->build_mcache_programming_params.per_plane_pipe_mcache_regs[dml_prog_idx][dc_pipe_index]) { 371 - memcpy(&dc_main_pipes[dc_pipe_index]->mcache_regs, 372 - l->build_mcache_programming_params.per_plane_pipe_mcache_regs[dml_prog_idx][dc_pipe_index], 373 - sizeof(struct dml2_hubp_pipe_mcache_regs)); 374 - } 375 - } 376 - 377 - /* get config for each phantom pipe */ 378 - if (pln_prog->phantom_plane.valid && 379 - dc_phantom_pipes[0] && 380 - dc_main_pipes[0]->stream && 381 - dc_phantom_pipes[0]->plane_state) { 382 - for (dc_pipe_index = 0; dc_pipe_index < num_pipes; dc_pipe_index++) { 383 - ASSERT(dc_phantom_pipes[dc_pipe_index]); 384 - if (l->build_mcache_programming_params.per_plane_pipe_mcache_regs[dml_phantom_prog_idx][dc_pipe_index]) { 385 - memcpy(&dc_phantom_pipes[dc_pipe_index]->mcache_regs, 386 - l->build_mcache_programming_params.per_plane_pipe_mcache_regs[dml_phantom_prog_idx][dc_pipe_index], 387 - sizeof(struct dml2_hubp_pipe_mcache_regs)); 388 - } 389 - } 390 - /* increment phantom index */ 391 - dml_phantom_prog_idx++; 392 - } 393 - } 394 91 } 395 92 396 93 void dml21_copy(struct dml2_context *dst_dml_ctx, ··· 87 446 88 447 dst_dml_ctx->v21.mode_programming.programming = dst_dml2_programming; 89 448 90 - DC_FP_START(); 91 - 92 449 /* need to initialize copied instance for internal references to be correct */ 93 450 dml2_initialize_instance(&dst_dml_ctx->v21.dml_init); 94 - 95 - DC_FP_END(); 96 451 } 97 452 98 453 bool dml21_create_copy(struct dml2_context **dst_dml_ctx, ··· 101 464 dml21_copy(*dst_dml_ctx, src_dml_ctx); 102 465 103 466 return true; 104 - } 105 - 106 - void dml21_reinit(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config) 107 - { 108 - dml21_init(in_dc, dml_ctx, config); 109 467 } 110 468
-30
drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.h
··· 34 34 struct dml2_context *src_dml_ctx); 35 35 bool dml21_create_copy(struct dml2_context **dst_dml_ctx, 36 36 struct dml2_context *src_dml_ctx); 37 - void dml21_reinit(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config); 38 - 39 - /** 40 - * dml21_validate - Determines if a display configuration is supported or not. 41 - * @in_dc: dc. 42 - * @context: dc_state to be validated. 43 - * @dml_ctx: dml21 context. 44 - * @validate_mode: DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX 45 - * will not populate context.res_ctx. 46 - * 47 - * Based on fast_validate option internally would call: 48 - * 49 - * -dml21_mode_check_and_programming - for DC_VALIDATE_MODE_AND_PROGRAMMING option 50 - * Calculates if dc_state can be supported on the input display 51 - * configuration. If supported, generates the necessary HW 52 - * programming for the new dc_state. 53 - * 54 - * -dml21_check_mode_support - for DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX option 55 - * Calculates if dc_state can be supported for the input display 56 - * config. 57 - * 58 - * Context: Two threads may not invoke this function concurrently unless they reference 59 - * separate dc_states for validation. 60 - * Return: True if mode is supported, false otherwise. 61 - */ 62 - bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx, 63 - enum dc_validate_mode validate_mode); 64 - 65 - /* Prepare hubp mcache_regs for hubp mcache ID and split coordinate programming */ 66 - void dml21_prepare_mcache_programming(struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx); 67 37 68 38 /* Structure for inputting external SOCBB and DCNIP values for tool based debugging. */ 69 39 struct socbb_ip_params_external {
+381
drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper_fpu.c
··· 1 + // SPDX-License-Identifier: MIT 2 + // 3 + // Copyright 2026 Advanced Micro Devices, Inc. 4 + 5 + #include "dml2_internal_types.h" 6 + #include "dml_top.h" 7 + #include "dml2_core_dcn4_calcs.h" 8 + #include "dml2_internal_shared_types.h" 9 + #include "dml21_utils.h" 10 + #include "dml21_translation_helper.h" 11 + #include "dml2_dc_resource_mgmt.h" 12 + #include "dml2_wrapper.h" 13 + #include "dml2_wrapper_fpu.h" 14 + #include "dml21_wrapper.h" 15 + #include "dml21_wrapper_fpu.h" 16 + 17 + #define INVALID -1 18 + 19 + static void dml21_populate_configuration_options(const struct dc *in_dc, 20 + struct dml2_context *dml_ctx, 21 + const struct dml2_configuration_options *config) 22 + { 23 + dml_ctx->config = *config; 24 + 25 + /* UCLK P-State options */ 26 + if (in_dc->debug.dml21_force_pstate_method) { 27 + dml_ctx->config.pmo.force_pstate_method_enable = true; 28 + for (int i = 0; i < MAX_PIPES; i++) 29 + dml_ctx->config.pmo.force_pstate_method_values[i] = in_dc->debug.dml21_force_pstate_method_values[i]; 30 + } else { 31 + dml_ctx->config.pmo.force_pstate_method_enable = false; 32 + } 33 + } 34 + 35 + void dml21_init(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config) 36 + { 37 + dml_ctx->architecture = dml2_architecture_21; 38 + 39 + dml21_populate_configuration_options(in_dc, dml_ctx, config); 40 + 41 + dml21_populate_dml_init_params(&dml_ctx->v21.dml_init, &dml_ctx->config, in_dc); 42 + 43 + dml2_initialize_instance(&dml_ctx->v21.dml_init); 44 + } 45 + 46 + void dml21_reinit(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config) 47 + { 48 + dml21_init(in_dc, dml_ctx, config); 49 + } 50 + 51 + static void dml21_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *context, struct resource_context *out_new_hw_state, 52 + struct dml2_context *in_ctx, unsigned int pipe_cnt) 53 + { 54 + unsigned int dml_prog_idx = 0, dc_pipe_index = 0, num_dpps_required = 0; 55 + struct dml2_per_plane_programming *pln_prog = NULL; 56 + struct dml2_per_stream_programming *stream_prog = NULL; 57 + struct pipe_ctx *dc_main_pipes[__DML2_WRAPPER_MAX_STREAMS_PLANES__]; 58 + struct pipe_ctx *dc_phantom_pipes[__DML2_WRAPPER_MAX_STREAMS_PLANES__] = {0}; 59 + int num_pipes; 60 + unsigned int dml_phantom_prog_idx; 61 + 62 + context->bw_ctx.bw.dcn.clk.dppclk_khz = 0; 63 + 64 + /* copy global DCHUBBUB arbiter registers */ 65 + memcpy(&context->bw_ctx.bw.dcn.arb_regs, &in_ctx->v21.mode_programming.programming->global_regs.arb_regs, sizeof(struct dml2_display_arb_regs)); 66 + 67 + /* legacy only */ 68 + context->bw_ctx.bw.dcn.compbuf_size_kb = (int)in_ctx->v21.mode_programming.programming->global_regs.arb_regs.compbuf_size * 64; 69 + 70 + context->bw_ctx.bw.dcn.mall_ss_size_bytes = 0; 71 + context->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes = 0; 72 + context->bw_ctx.bw.dcn.mall_subvp_size_bytes = 0; 73 + 74 + /* phantom's start after main planes */ 75 + dml_phantom_prog_idx = in_ctx->v21.mode_programming.programming->display_config.num_planes; 76 + 77 + for (dml_prog_idx = 0; dml_prog_idx < DML2_MAX_PLANES; dml_prog_idx++) { 78 + pln_prog = &in_ctx->v21.mode_programming.programming->plane_programming[dml_prog_idx]; 79 + 80 + if (!pln_prog->plane_descriptor) 81 + continue; 82 + 83 + stream_prog = &in_ctx->v21.mode_programming.programming->stream_programming[pln_prog->plane_descriptor->stream_index]; 84 + num_dpps_required = pln_prog->num_dpps_required; 85 + 86 + if (num_dpps_required == 0) { 87 + continue; 88 + } 89 + num_pipes = dml21_find_dc_pipes_for_plane(dc, context, in_ctx, dc_main_pipes, dc_phantom_pipes, dml_prog_idx); 90 + 91 + if (num_pipes <= 0) 92 + continue; 93 + 94 + /* program each pipe */ 95 + for (dc_pipe_index = 0; dc_pipe_index < num_pipes; dc_pipe_index++) { 96 + dml21_program_dc_pipe(in_ctx, context, dc_main_pipes[dc_pipe_index], pln_prog, stream_prog); 97 + 98 + if (pln_prog->phantom_plane.valid && dc_phantom_pipes[dc_pipe_index]) { 99 + dml21_program_dc_pipe(in_ctx, context, dc_phantom_pipes[dc_pipe_index], pln_prog, stream_prog); 100 + } 101 + } 102 + 103 + /* copy per plane mcache allocation */ 104 + memcpy(&context->bw_ctx.bw.dcn.mcache_allocations[dml_prog_idx], &pln_prog->mcache_allocation, sizeof(struct dml2_mcache_surface_allocation)); 105 + if (pln_prog->phantom_plane.valid) { 106 + memcpy(&context->bw_ctx.bw.dcn.mcache_allocations[dml_phantom_prog_idx], 107 + &pln_prog->phantom_plane.mcache_allocation, 108 + sizeof(struct dml2_mcache_surface_allocation)); 109 + 110 + dml_phantom_prog_idx++; 111 + } 112 + } 113 + 114 + /* assign global clocks */ 115 + context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz; 116 + context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz; 117 + if (in_ctx->v21.dml_init.soc_bb.clk_table.dispclk.num_clk_values > 1) { 118 + context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = 119 + in_ctx->v21.dml_init.soc_bb.clk_table.dispclk.clk_values_khz[in_ctx->v21.dml_init.soc_bb.clk_table.dispclk.num_clk_values] * 1000; 120 + } else { 121 + context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = in_ctx->v21.dml_init.soc_bb.clk_table.dispclk.clk_values_khz[0] * 1000; 122 + } 123 + 124 + if (in_ctx->v21.dml_init.soc_bb.clk_table.dppclk.num_clk_values > 1) { 125 + context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = 126 + in_ctx->v21.dml_init.soc_bb.clk_table.dppclk.clk_values_khz[in_ctx->v21.dml_init.soc_bb.clk_table.dppclk.num_clk_values] * 1000; 127 + } else { 128 + context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = in_ctx->v21.dml_init.soc_bb.clk_table.dppclk.clk_values_khz[0] * 1000; 129 + } 130 + 131 + /* get global mall allocation */ 132 + if (dc->res_pool->funcs->calculate_mall_ways_from_bytes) { 133 + context->bw_ctx.bw.dcn.clk.num_ways = dc->res_pool->funcs->calculate_mall_ways_from_bytes(dc, context->bw_ctx.bw.dcn.mall_subvp_size_bytes); 134 + } else { 135 + context->bw_ctx.bw.dcn.clk.num_ways = 0; 136 + } 137 + } 138 + 139 + static void dml21_prepare_mcache_params(struct dml2_context *dml_ctx, struct dc_state *context, struct dc_mcache_params *mcache_params) 140 + { 141 + int dc_plane_idx = 0; 142 + int dml_prog_idx, stream_idx, plane_idx; 143 + struct dml2_per_plane_programming *pln_prog = NULL; 144 + 145 + for (stream_idx = 0; stream_idx < context->stream_count; stream_idx++) { 146 + for (plane_idx = 0; plane_idx < context->stream_status[stream_idx].plane_count; plane_idx++) { 147 + dml_prog_idx = map_plane_to_dml21_display_cfg(dml_ctx, context->streams[stream_idx]->stream_id, context->stream_status[stream_idx].plane_states[plane_idx], context); 148 + if (dml_prog_idx == INVALID) { 149 + continue; 150 + } 151 + pln_prog = &dml_ctx->v21.mode_programming.programming->plane_programming[dml_prog_idx]; 152 + mcache_params[dc_plane_idx].valid = pln_prog->mcache_allocation.valid; 153 + mcache_params[dc_plane_idx].num_mcaches_plane0 = pln_prog->mcache_allocation.num_mcaches_plane0; 154 + mcache_params[dc_plane_idx].num_mcaches_plane1 = pln_prog->mcache_allocation.num_mcaches_plane1; 155 + mcache_params[dc_plane_idx].requires_dedicated_mall_mcache = pln_prog->mcache_allocation.requires_dedicated_mall_mcache; 156 + mcache_params[dc_plane_idx].last_slice_sharing.plane0_plane1 = pln_prog->mcache_allocation.last_slice_sharing.plane0_plane1; 157 + memcpy(mcache_params[dc_plane_idx].mcache_x_offsets_plane0, 158 + pln_prog->mcache_allocation.mcache_x_offsets_plane0, 159 + sizeof(int) * (DML2_MAX_MCACHES + 1)); 160 + memcpy(mcache_params[dc_plane_idx].mcache_x_offsets_plane1, 161 + pln_prog->mcache_allocation.mcache_x_offsets_plane1, 162 + sizeof(int) * (DML2_MAX_MCACHES + 1)); 163 + dc_plane_idx++; 164 + } 165 + } 166 + } 167 + 168 + static bool dml21_check_mode_support(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx) 169 + { 170 + bool is_supported = false; 171 + struct dml2_initialize_instance_in_out *dml_init = &dml_ctx->v21.dml_init; 172 + struct dml2_check_mode_supported_in_out *mode_support = &dml_ctx->v21.mode_support; 173 + 174 + memset(&dml_ctx->v21.display_config, 0, sizeof(struct dml2_display_cfg)); 175 + memset(&dml_ctx->v21.dml_to_dc_pipe_mapping, 0, sizeof(struct dml2_dml_to_dc_pipe_mapping)); 176 + memset(&dml_ctx->v21.mode_programming.dml2_instance->scratch.check_mode_supported_locals.mode_support_params, 0, sizeof(struct dml2_core_mode_support_in_out)); 177 + 178 + if (!context || context->stream_count == 0) 179 + return true; 180 + 181 + /* Scrub phantom's from current dc_state */ 182 + dml_ctx->config.svp_pstate.callbacks.remove_phantom_streams_and_planes(in_dc, context); 183 + dml_ctx->config.svp_pstate.callbacks.release_phantom_streams_and_planes(in_dc, context); 184 + 185 + mode_support->dml2_instance = dml_init->dml2_instance; 186 + dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx); 187 + dml_ctx->v21.mode_programming.dml2_instance->scratch.build_mode_programming_locals.mode_programming_params.programming = dml_ctx->v21.mode_programming.programming; 188 + 189 + is_supported = dml2_check_mode_supported(mode_support); 190 + 191 + if (!is_supported) 192 + return false; 193 + 194 + return true; 195 + } 196 + 197 + static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx) 198 + { 199 + bool result = false; 200 + struct dml2_build_mode_programming_in_out *mode_programming = &dml_ctx->v21.mode_programming; 201 + struct dc_mcache_params mcache_params[MAX_PLANES] = {0}; 202 + 203 + memset(&dml_ctx->v21.display_config, 0, sizeof(struct dml2_display_cfg)); 204 + memset(&dml_ctx->v21.dml_to_dc_pipe_mapping, 0, sizeof(struct dml2_dml_to_dc_pipe_mapping)); 205 + memset(&dml_ctx->v21.mode_programming.dml2_instance->scratch.build_mode_programming_locals.mode_programming_params, 0, sizeof(struct dml2_core_mode_programming_in_out)); 206 + 207 + if (!context) 208 + return true; 209 + 210 + if (context->stream_count == 0) { 211 + dml21_init_min_clocks_for_dc_state(dml_ctx, context); 212 + dml21_build_fams2_programming(in_dc, context, dml_ctx); 213 + return true; 214 + } 215 + 216 + /* scrub phantom's from current dc_state */ 217 + dml_ctx->config.svp_pstate.callbacks.remove_phantom_streams_and_planes(in_dc, context); 218 + dml_ctx->config.svp_pstate.callbacks.release_phantom_streams_and_planes(in_dc, context); 219 + 220 + /* Populate stream, plane mappings and other fields in display config. */ 221 + result = dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx); 222 + if (!result) 223 + return false; 224 + 225 + result = dml2_build_mode_programming(mode_programming); 226 + 227 + if (!result) 228 + return false; 229 + 230 + /* Check and map HW resources */ 231 + if (result && !dml_ctx->config.skip_hw_state_mapping) { 232 + dml21_map_hw_resources(dml_ctx); 233 + dml2_map_dc_pipes(dml_ctx, context, NULL, &dml_ctx->v21.dml_to_dc_pipe_mapping, in_dc->current_state); 234 + /* if subvp phantoms are present, expand them into dc context */ 235 + dml21_handle_phantom_streams_planes(in_dc, context, dml_ctx); 236 + 237 + if (in_dc->res_pool->funcs->program_mcache_pipe_config) { 238 + //Prepare mcache params for each plane based on mcache output from DML 239 + dml21_prepare_mcache_params(dml_ctx, context, mcache_params); 240 + 241 + //populate mcache regs to each pipe 242 + dml_ctx->config.callbacks.allocate_mcache(context, mcache_params); 243 + } 244 + } 245 + 246 + /* Copy DML CLK, WM and REG outputs to bandwidth context */ 247 + if (result && !dml_ctx->config.skip_hw_state_mapping) { 248 + dml21_calculate_rq_and_dlg_params(in_dc, context, &context->res_ctx, dml_ctx, in_dc->res_pool->pipe_count); 249 + dml21_copy_clocks_to_dc_state(dml_ctx, context); 250 + dml21_extract_watermark_sets(in_dc, &context->bw_ctx.bw.dcn.watermarks, dml_ctx); 251 + dml21_build_fams2_programming(in_dc, context, dml_ctx); 252 + } 253 + 254 + return true; 255 + } 256 + 257 + bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx, 258 + enum dc_validate_mode validate_mode) 259 + { 260 + bool out = false; 261 + 262 + /* Use dml21_check_mode_support for DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX path */ 263 + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) 264 + out = dml21_check_mode_support(in_dc, context, dml_ctx); 265 + else 266 + out = dml21_mode_check_and_programming(in_dc, context, dml_ctx); 267 + 268 + return out; 269 + } 270 + 271 + void dml21_prepare_mcache_programming(struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx) 272 + { 273 + unsigned int dml_prog_idx, dml_phantom_prog_idx, dc_pipe_index; 274 + int num_pipes; 275 + struct pipe_ctx *dc_main_pipes[__DML2_WRAPPER_MAX_STREAMS_PLANES__]; 276 + struct pipe_ctx *dc_phantom_pipes[__DML2_WRAPPER_MAX_STREAMS_PLANES__] = {0}; 277 + 278 + struct dml2_per_plane_programming *pln_prog = NULL; 279 + struct dml2_plane_mcache_configuration_descriptor *mcache_config = NULL; 280 + struct prepare_mcache_programming_locals *l = &dml_ctx->v21.scratch.prepare_mcache_locals; 281 + 282 + if (context->stream_count == 0) { 283 + return; 284 + } 285 + 286 + memset(&l->build_mcache_programming_params, 0, sizeof(struct dml2_build_mcache_programming_in_out)); 287 + l->build_mcache_programming_params.dml2_instance = dml_ctx->v21.dml_init.dml2_instance; 288 + 289 + /* phantom's start after main planes */ 290 + dml_phantom_prog_idx = dml_ctx->v21.mode_programming.programming->display_config.num_planes; 291 + 292 + /* Build mcache programming parameters per plane per pipe */ 293 + for (dml_prog_idx = 0; dml_prog_idx < dml_ctx->v21.mode_programming.programming->display_config.num_planes; dml_prog_idx++) { 294 + pln_prog = &dml_ctx->v21.mode_programming.programming->plane_programming[dml_prog_idx]; 295 + 296 + mcache_config = &l->build_mcache_programming_params.mcache_configurations[dml_prog_idx]; 297 + memset(mcache_config, 0, sizeof(struct dml2_plane_mcache_configuration_descriptor)); 298 + mcache_config->plane_descriptor = pln_prog->plane_descriptor; 299 + mcache_config->mcache_allocation = &context->bw_ctx.bw.dcn.mcache_allocations[dml_prog_idx]; 300 + ASSERT(pln_prog->num_dpps_required <= 0x7F); 301 + mcache_config->num_pipes = (char)pln_prog->num_dpps_required; 302 + l->build_mcache_programming_params.num_configurations++; 303 + 304 + if (pln_prog->num_dpps_required == 0) { 305 + continue; 306 + } 307 + 308 + num_pipes = dml21_find_dc_pipes_for_plane(in_dc, context, dml_ctx, dc_main_pipes, dc_phantom_pipes, dml_prog_idx); 309 + if (num_pipes <= 0 || dc_main_pipes[0]->stream == NULL || 310 + dc_main_pipes[0]->plane_state == NULL) 311 + continue; 312 + 313 + /* get config for each pipe */ 314 + for (dc_pipe_index = 0; dc_pipe_index < num_pipes; dc_pipe_index++) { 315 + ASSERT(dc_main_pipes[dc_pipe_index]); 316 + dml21_get_pipe_mcache_config(context, dc_main_pipes[dc_pipe_index], pln_prog, &mcache_config->pipe_configurations[dc_pipe_index]); 317 + } 318 + 319 + /* get config for each phantom pipe */ 320 + if (pln_prog->phantom_plane.valid && 321 + dc_phantom_pipes[0] && 322 + dc_main_pipes[0]->stream && 323 + dc_phantom_pipes[0]->plane_state) { 324 + mcache_config = &l->build_mcache_programming_params.mcache_configurations[dml_phantom_prog_idx]; 325 + memset(mcache_config, 0, sizeof(struct dml2_plane_mcache_configuration_descriptor)); 326 + mcache_config->plane_descriptor = pln_prog->plane_descriptor; 327 + mcache_config->mcache_allocation = &context->bw_ctx.bw.dcn.mcache_allocations[dml_phantom_prog_idx]; 328 + ASSERT(pln_prog->num_dpps_required <= 0x7F); 329 + mcache_config->num_pipes = (char)pln_prog->num_dpps_required; 330 + l->build_mcache_programming_params.num_configurations++; 331 + 332 + for (dc_pipe_index = 0; dc_pipe_index < num_pipes; dc_pipe_index++) { 333 + ASSERT(dc_phantom_pipes[dc_pipe_index]); 334 + dml21_get_pipe_mcache_config(context, dc_phantom_pipes[dc_pipe_index], pln_prog, &mcache_config->pipe_configurations[dc_pipe_index]); 335 + } 336 + 337 + /* increment phantom index */ 338 + dml_phantom_prog_idx++; 339 + } 340 + } 341 + 342 + /* Call to generate mcache programming per plane per pipe for the given display configuration */ 343 + dml2_build_mcache_programming(&l->build_mcache_programming_params); 344 + 345 + /* get per plane per pipe mcache programming */ 346 + for (dml_prog_idx = 0; dml_prog_idx < dml_ctx->v21.mode_programming.programming->display_config.num_planes; dml_prog_idx++) { 347 + pln_prog = &dml_ctx->v21.mode_programming.programming->plane_programming[dml_prog_idx]; 348 + 349 + num_pipes = dml21_find_dc_pipes_for_plane(in_dc, context, dml_ctx, dc_main_pipes, dc_phantom_pipes, dml_prog_idx); 350 + if (num_pipes <= 0 || dc_main_pipes[0]->stream == NULL || 351 + dc_main_pipes[0]->plane_state == NULL) 352 + continue; 353 + 354 + /* get config for each pipe */ 355 + for (dc_pipe_index = 0; dc_pipe_index < num_pipes; dc_pipe_index++) { 356 + ASSERT(dc_main_pipes[dc_pipe_index]); 357 + if (l->build_mcache_programming_params.per_plane_pipe_mcache_regs[dml_prog_idx][dc_pipe_index]) { 358 + memcpy(&dc_main_pipes[dc_pipe_index]->mcache_regs, 359 + l->build_mcache_programming_params.per_plane_pipe_mcache_regs[dml_prog_idx][dc_pipe_index], 360 + sizeof(struct dml2_hubp_pipe_mcache_regs)); 361 + } 362 + } 363 + 364 + /* get config for each phantom pipe */ 365 + if (pln_prog->phantom_plane.valid && 366 + dc_phantom_pipes[0] && 367 + dc_main_pipes[0]->stream && 368 + dc_phantom_pipes[0]->plane_state) { 369 + for (dc_pipe_index = 0; dc_pipe_index < num_pipes; dc_pipe_index++) { 370 + ASSERT(dc_phantom_pipes[dc_pipe_index]); 371 + if (l->build_mcache_programming_params.per_plane_pipe_mcache_regs[dml_phantom_prog_idx][dc_pipe_index]) { 372 + memcpy(&dc_phantom_pipes[dc_pipe_index]->mcache_regs, 373 + l->build_mcache_programming_params.per_plane_pipe_mcache_regs[dml_phantom_prog_idx][dc_pipe_index], 374 + sizeof(struct dml2_hubp_pipe_mcache_regs)); 375 + } 376 + } 377 + /* increment phantom index */ 378 + dml_phantom_prog_idx++; 379 + } 380 + } 381 + }
+60
drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper_fpu.h
··· 1 + // SPDX-License-Identifier: MIT 2 + // 3 + // Copyright 2026 Advanced Micro Devices, Inc. 4 + 5 + #ifndef _DML21_WRAPPER_FPU_H_ 6 + #define _DML21_WRAPPER_FPU_H_ 7 + 8 + #include "os_types.h" 9 + #include "dml_top_soc_parameter_types.h" 10 + #include "dml_top_display_cfg_types.h" 11 + 12 + struct dc; 13 + struct dc_state; 14 + struct dml2_configuration_options; 15 + struct dml2_context; 16 + enum dc_validate_mode; 17 + 18 + /** 19 + * dml21_init - Initialize DML21 context 20 + * @in_dc: dc. 21 + * @dml_ctx: DML21 context to initialize. 22 + * @config: dml21 configuration options. 23 + * 24 + * Performs FPU-requiring initialization. Must be called with FPU protection. 25 + */ 26 + void dml21_init(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config); 27 + 28 + /** 29 + * dml21_validate - Determines if a display configuration is supported or not. 30 + * @in_dc: dc. 31 + * @context: dc_state to be validated. 32 + * @dml_ctx: dml21 context. 33 + * @validate_mode: DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX 34 + * will not populate context.res_ctx. 35 + * 36 + * Based on fast_validate option internally would call: 37 + * 38 + * -dml21_mode_check_and_programming - for DC_VALIDATE_MODE_AND_PROGRAMMING option 39 + * Calculates if dc_state can be supported on the input display 40 + * configuration. If supported, generates the necessary HW 41 + * programming for the new dc_state. 42 + * 43 + * -dml21_check_mode_support - for DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX option 44 + * Calculates if dc_state can be supported for the input display 45 + * config. 46 + * 47 + * Context: Two threads may not invoke this function concurrently unless they reference 48 + * separate dc_states for validation. 49 + * Return: True if mode is supported, false otherwise. 50 + */ 51 + 52 + void dml21_reinit(const struct dc *in_dc, struct dml2_context *dml_ctx, 53 + const struct dml2_configuration_options *config); 54 + bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx, 55 + enum dc_validate_mode validate_mode); 56 + 57 + /* Prepare hubp mcache_regs for hubp mcache ID and split coordinate programming */ 58 + void dml21_prepare_mcache_programming(struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx); 59 + 60 + #endif /* _DML21_WRAPPER_FPU_H_ */
+20
drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
··· 279 279 bool result = false; 280 280 int index = 0; 281 281 282 + /* Guard against empty clock tables (e.g. DTBCLK on DCN42B where the 283 + * clock is tied off and num_clk_values == 0). Without this check the 284 + * else-if branch below would evaluate 285 + * clk_values_khz[num_clk_values - 1] with num_clk_values == 0, which 286 + * wraps the unsigned char index to 255 — a 235-element out-of-bounds 287 + * read on an array of DML_MAX_CLK_TABLE_SIZE (20) entries. 288 + * 289 + * Semantic: if the clock doesn't exist on this ASIC but no frequency 290 + * is required (min_value == 0), the request is trivially satisfied. 291 + * If a non-zero frequency is required but the clock is absent, the 292 + * configuration is unsupportable. 293 + */ 294 + if (clock_table->num_clk_values == 0) { 295 + if (min_value == 0) { 296 + *rounded_value = 0; 297 + return true; 298 + } 299 + return false; 300 + } 301 + 282 302 if (clock_table->num_clk_values > 2) { 283 303 while (index < clock_table->num_clk_values && clock_table->clk_values_khz[index] < min_value) 284 304 index++;
+6 -3
drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.c
··· 178 178 179 179 min_table->max_clocks_khz.dispclk = soc_bb->clk_table.dispclk.clk_values_khz[soc_bb->clk_table.dispclk.num_clk_values - 1]; 180 180 min_table->max_clocks_khz.dppclk = soc_bb->clk_table.dppclk.clk_values_khz[soc_bb->clk_table.dppclk.num_clk_values - 1]; 181 - min_table->max_clocks_khz.dscclk = soc_bb->clk_table.dscclk.clk_values_khz[soc_bb->clk_table.dscclk.num_clk_values - 1]; 182 - min_table->max_clocks_khz.dtbclk = soc_bb->clk_table.dtbclk.clk_values_khz[soc_bb->clk_table.dtbclk.num_clk_values - 1]; 183 - min_table->max_clocks_khz.phyclk = soc_bb->clk_table.phyclk.clk_values_khz[soc_bb->clk_table.phyclk.num_clk_values - 1]; 181 + min_table->max_clocks_khz.dscclk = (soc_bb->clk_table.dscclk.num_clk_values > 0) ? 182 + soc_bb->clk_table.dscclk.clk_values_khz[soc_bb->clk_table.dscclk.num_clk_values - 1] : 0; 183 + min_table->max_clocks_khz.dtbclk = (soc_bb->clk_table.dtbclk.num_clk_values > 0) ? 184 + soc_bb->clk_table.dtbclk.clk_values_khz[soc_bb->clk_table.dtbclk.num_clk_values - 1] : 0; 185 + min_table->max_clocks_khz.phyclk = (soc_bb->clk_table.phyclk.num_clk_values > 0) ? 186 + soc_bb->clk_table.phyclk.clk_values_khz[soc_bb->clk_table.phyclk.num_clk_values - 1] : 0; 184 187 185 188 min_table->max_ss_clocks_khz.dispclk = (unsigned int)((double)min_table->max_clocks_khz.dispclk / (1.0 + soc_bb->dcn_downspread_percent / 100.0)); 186 189 min_table->max_ss_clocks_khz.dppclk = (unsigned int)((double)min_table->max_clocks_khz.dppclk / (1.0 + soc_bb->dcn_downspread_percent / 100.0));
+6 -3
drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn42.c
··· 54 54 55 55 min_table->max_clocks_khz.dispclk = soc_bb->clk_table.dispclk.clk_values_khz[soc_bb->clk_table.dispclk.num_clk_values - 1]; 56 56 min_table->max_clocks_khz.dppclk = soc_bb->clk_table.dppclk.clk_values_khz[soc_bb->clk_table.dppclk.num_clk_values - 1]; 57 - min_table->max_clocks_khz.dscclk = soc_bb->clk_table.dscclk.clk_values_khz[soc_bb->clk_table.dscclk.num_clk_values - 1]; 58 - min_table->max_clocks_khz.dtbclk = soc_bb->clk_table.dtbclk.clk_values_khz[soc_bb->clk_table.dtbclk.num_clk_values - 1]; 59 - min_table->max_clocks_khz.phyclk = soc_bb->clk_table.phyclk.clk_values_khz[soc_bb->clk_table.phyclk.num_clk_values - 1]; 57 + min_table->max_clocks_khz.dscclk = (soc_bb->clk_table.dscclk.num_clk_values > 0) ? 58 + soc_bb->clk_table.dscclk.clk_values_khz[soc_bb->clk_table.dscclk.num_clk_values - 1] : 0; 59 + min_table->max_clocks_khz.dtbclk = (soc_bb->clk_table.dtbclk.num_clk_values > 0) ? 60 + soc_bb->clk_table.dtbclk.clk_values_khz[soc_bb->clk_table.dtbclk.num_clk_values - 1] : 0; 61 + min_table->max_clocks_khz.phyclk = (soc_bb->clk_table.phyclk.num_clk_values > 0) ? 62 + soc_bb->clk_table.phyclk.clk_values_khz[soc_bb->clk_table.phyclk.num_clk_values - 1] : 0; 60 63 61 64 min_table->max_ss_clocks_khz.dispclk = (unsigned int)((double)min_table->max_clocks_khz.dispclk / (1.0 + soc_bb->dcn_downspread_percent / 100.0)); 62 65 min_table->max_ss_clocks_khz.dppclk = (unsigned int)((double)min_table->max_clocks_khz.dppclk / (1.0 + soc_bb->dcn_downspread_percent / 100.0));
+17 -11
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c
··· 35 35 #define MAX_MPCC_FACTOR 4 36 36 37 37 struct dc_plane_pipe_pool { 38 - int pipes_assigned_to_plane[MAX_ODM_FACTOR][MAX_MPCC_FACTOR]; 38 + unsigned int pipes_assigned_to_plane[MAX_ODM_FACTOR][MAX_MPCC_FACTOR]; 39 39 bool pipe_used[MAX_ODM_FACTOR][MAX_MPCC_FACTOR]; 40 40 int num_pipes_assigned_to_plane_for_mpcc_combine; 41 41 int num_pipes_assigned_to_plane_for_odm_combine; ··· 340 340 static bool find_more_pipes_for_stream(struct dml2_context *ctx, 341 341 struct dc_state *state, // The state we want to find a free mapping in 342 342 unsigned int stream_id, // The stream we want this pipe to drive 343 - int *assigned_pipes, 344 - int *assigned_pipe_count, 343 + unsigned int *assigned_pipes, 344 + unsigned int *assigned_pipe_count, 345 345 int pipes_needed, 346 346 const struct dc_state *existing_state) // The state (optional) that we want to minimize remapping relative to 347 347 { ··· 366 366 if (!is_plane_using_pipe(pipe)) { 367 367 pipes_needed--; 368 368 // TODO: This doens't make sense really, pipe_idx should always be valid 369 - pipe->pipe_idx = preferred_pipe_candidates[i]; 369 + ASSERT(preferred_pipe_candidates[i] <= 0xFF); 370 + pipe->pipe_idx = (uint8_t)preferred_pipe_candidates[i]; 370 371 assigned_pipes[(*assigned_pipe_count)++] = pipe->pipe_idx; 371 372 } 372 373 } ··· 383 382 if (!is_plane_using_pipe(pipe)) { 384 383 pipes_needed--; 385 384 // TODO: This doens't make sense really, pipe_idx should always be valid 386 - pipe->pipe_idx = i; 385 + ASSERT(i >= 0 && i <= 0xFF); 386 + pipe->pipe_idx = (uint8_t)i; 387 387 assigned_pipes[(*assigned_pipe_count)++] = pipe->pipe_idx; 388 388 } 389 389 } ··· 395 393 if (!is_plane_using_pipe(pipe)) { 396 394 pipes_needed--; 397 395 // TODO: This doens't make sense really, pipe_idx should always be valid 398 - pipe->pipe_idx = last_resort_pipe_candidates[i]; 396 + ASSERT(last_resort_pipe_candidates[i] <= 0xFF); 397 + pipe->pipe_idx = (uint8_t)last_resort_pipe_candidates[i]; 399 398 assigned_pipes[(*assigned_pipe_count)++] = pipe->pipe_idx; 400 399 } 401 400 } ··· 409 406 static bool find_more_free_pipes(struct dml2_context *ctx, 410 407 struct dc_state *state, // The state we want to find a free mapping in 411 408 unsigned int stream_id, // The stream we want this pipe to drive 412 - int *assigned_pipes, 413 - int *assigned_pipe_count, 409 + unsigned int *assigned_pipes, 410 + unsigned int *assigned_pipe_count, 414 411 int pipes_needed, 415 412 const struct dc_state *existing_state) // The state (optional) that we want to minimize remapping relative to 416 413 { ··· 435 432 if (is_pipe_free(pipe)) { 436 433 pipes_needed--; 437 434 // TODO: This doens't make sense really, pipe_idx should always be valid 438 - pipe->pipe_idx = preferred_pipe_candidates[i]; 435 + ASSERT(preferred_pipe_candidates[i] <= 0xFF); 436 + pipe->pipe_idx = (uint8_t)preferred_pipe_candidates[i]; 439 437 assigned_pipes[(*assigned_pipe_count)++] = pipe->pipe_idx; 440 438 } 441 439 } ··· 452 448 if (is_pipe_free(pipe)) { 453 449 pipes_needed--; 454 450 // TODO: This doens't make sense really, pipe_idx should always be valid 455 - pipe->pipe_idx = i; 451 + ASSERT(i >= 0 && i <= 0xFF); 452 + pipe->pipe_idx = (uint8_t)i; 456 453 assigned_pipes[(*assigned_pipe_count)++] = pipe->pipe_idx; 457 454 } 458 455 } ··· 464 459 if (is_pipe_free(pipe)) { 465 460 pipes_needed--; 466 461 // TODO: This doens't make sense really, pipe_idx should always be valid 467 - pipe->pipe_idx = last_resort_pipe_candidates[i]; 462 + ASSERT(last_resort_pipe_candidates[i] <= 0xFF); 463 + pipe->pipe_idx = (uint8_t)last_resort_pipe_candidates[i]; 468 464 assigned_pipes[(*assigned_pipe_count)++] = pipe->pipe_idx; 469 465 } 470 466 }
+2 -1
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.c
··· 555 555 556 556 if (!found && pipe_mall_type == SUBVP_NONE) { 557 557 // Found pipe which is not SubVP or Phantom (i.e. the VBLANK pipe). 558 - vblank_index = i; 558 + ASSERT(i <= 0xFF); 559 + vblank_index = (uint8_t)i; 559 560 found = true; 560 561 } 561 562
+3 -3
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.c
··· 330 330 { 331 331 struct dml2_policy_build_synthetic_soc_states_scratch *s = &dml2->v20.scratch.create_scratch.build_synthetic_socbb_scratch; 332 332 struct dml2_policy_build_synthetic_soc_states_params *p = &dml2->v20.scratch.build_synthetic_socbb_params; 333 - unsigned int dcfclk_stas_mhz[NUM_DCFCLK_STAS] = {0}; 334 - unsigned int dcfclk_stas_mhz_new[NUM_DCFCLK_STAS_NEW] = {0}; 333 + int dcfclk_stas_mhz[NUM_DCFCLK_STAS] = {0}; 334 + int dcfclk_stas_mhz_new[NUM_DCFCLK_STAS_NEW] = {0}; 335 335 unsigned int dml_project = dml2->v20.dml_core_ctx.project; 336 336 337 337 unsigned int i = 0; ··· 765 765 out->PixelClock[location] *= 2; 766 766 out->HTotal[location] = in->timing.h_total; 767 767 out->VTotal[location] = in->timing.v_total; 768 - out->Interlace[location] = in->timing.flags.INTERLACE; 768 + out->Interlace[location] = (in->timing.flags.INTERLACE != 0); 769 769 hblank_start = in->timing.h_total - in->timing.h_front_porch; 770 770 out->HBlankEnd[location] = hblank_start 771 771 - in->timing.h_addressable
+2 -1
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.c
··· 255 255 pipe_ctx->pipe_dlg_param.vupdate_width = dml_get_vupdate_width(mode_lib, pipe_idx); 256 256 pipe_ctx->pipe_dlg_param.vready_offset = dml_get_vready_offset(mode_lib, pipe_idx); 257 257 258 - pipe_ctx->pipe_dlg_param.otg_inst = pipe_ctx->stream_res.tg->inst; 258 + ASSERT(pipe_ctx->stream_res.tg->inst >= 0 && pipe_ctx->stream_res.tg->inst <= 0xFF); 259 + pipe_ctx->pipe_dlg_param.otg_inst = (unsigned char)pipe_ctx->stream_res.tg->inst; 259 260 260 261 pipe_ctx->pipe_dlg_param.hactive = hactive; 261 262 pipe_ctx->pipe_dlg_param.vactive = vactive;
+16 -7
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.c
··· 6 6 */ 7 7 8 8 #include "dml2_internal_types.h" 9 + #include "dml2_wrapper.h" 9 10 #include "dml2_wrapper_fpu.h" 11 + #include "dml21_wrapper.h" 12 + #include "dml21_wrapper_fpu.h" 10 13 14 + #include "dc_fpu.h" 15 + 16 + #if !defined(DC_RUN_WITH_PREEMPTION_ENABLED) 17 + #define DC_RUN_WITH_PREEMPTION_ENABLED(code) code 18 + #endif // !DC_RUN_WITH_PREEMPTION_ENABLED 19 + 20 + struct dml2_context *dml2_allocate_memory(void) 21 + { 22 + struct dml2_context *dml2; 23 + 24 + DC_RUN_WITH_PREEMPTION_ENABLED(dml2 = vzalloc(sizeof(struct dml2_context))); 25 + return dml2; 26 + } 11 27 bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml2, 12 28 enum dc_validate_mode validate_mode) 13 29 { ··· 39 23 return out; 40 24 } 41 25 42 - DC_FP_START(); 43 - 44 26 /* Use dml_validate_only for DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX path */ 45 27 if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) 46 28 out = dml2_validate_only(context, validate_mode); 47 29 else 48 30 out = dml2_validate_and_build_resource(in_dc, context, validate_mode); 49 - 50 - DC_FP_END(); 51 31 52 32 return out; 53 33 } ··· 82 70 break; 83 71 } 84 72 85 - DC_FP_START(); 86 - 87 73 initialize_dml2_ip_params(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.ip); 88 74 89 75 initialize_dml2_soc_bbox(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc); 90 76 91 77 initialize_dml2_soc_states(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc, &(*dml2)->v20.dml_core_ctx.states); 92 78 93 - DC_FP_END(); 94 79 } 95 80 96 81 bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
+3 -6
drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper_fpu.c
··· 31 31 #include "dml2_translation_helper.h" 32 32 #include "dml2_mall_phantom.h" 33 33 #include "dml2_dc_resource_mgmt.h" 34 - #include "dml21_wrapper.h" 34 + #include "dml2_wrapper.h" 35 35 #include "dml2_wrapper_fpu.h" 36 + #include "dml21_wrapper.h" 37 + #include "dml21_wrapper_fpu.h" 36 38 37 39 void initialize_dml2_ip_params(struct dml2_context *dml2, const struct dc *in_dc, struct ip_params_st *out) 38 40 { ··· 546 544 if (dc->debug.override_odm_optimization) { 547 545 dml2->config.minimize_dispclk_using_odm = dc->debug.minimize_dispclk_using_odm; 548 546 } 549 - } 550 - 551 - inline struct dml2_context *dml2_allocate_memory(void) 552 - { 553 - return (struct dml2_context *) vzalloc(sizeof(struct dml2_context)); 554 547 } 555 548 556 549 void dml2_destroy(struct dml2_context *dml2)
+1
drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
··· 288 288 enum dc_color_space input_color_space, 289 289 struct cnv_alpha_2bit_lut *alpha_2bit_lut) 290 290 { 291 + (void)alpha_2bit_lut; 291 292 uint32_t pixel_format; 292 293 uint32_t alpha_en; 293 294 enum pixel_format_description fmt ;
+9 -2
drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.c
··· 92 92 void dpp2_dummy_program_input_lut( 93 93 struct dpp *dpp_base, 94 94 const struct dc_gamma *gamma) 95 - {} 95 + { 96 + (void)dpp_base; 97 + (void)gamma; 98 + } 96 99 97 100 static void dpp2_cnv_setup ( 98 101 struct dpp *dpp_base, ··· 372 369 struct dpp *dpp, 373 370 const struct pwl_params *params, 374 371 enum opp_regamma mode) 375 - {} 372 + { 373 + (void)dpp; 374 + (void)params; 375 + (void)mode; 376 + } 376 377 377 378 static struct dpp_funcs dcn20_dpp_funcs = { 378 379 .dpp_read_state = dpp20_read_state,
+1
drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp_cm.c
··· 1016 1016 bool is_color_channel_12bits, 1017 1017 bool is_lut_size17x17x17) 1018 1018 { 1019 + (void)is_color_channel_12bits; 1019 1020 uint32_t lut_mode; 1020 1021 struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); 1021 1022
+1
drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
··· 1307 1307 bool is_color_channel_12bits, 1308 1308 bool is_lut_size17x17x17) 1309 1309 { 1310 + (void)is_color_channel_12bits; 1310 1311 uint32_t lut_mode; 1311 1312 struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); 1312 1313
+1
drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp_cm.c
··· 80 80 uint32_t num, 81 81 bool is_ram_a) 82 82 { 83 + (void)is_ram_a; 83 84 uint32_t i; 84 85 struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); 85 86 uint32_t last_base_value_red = rgb[num-1].red_reg + rgb[num-1].delta_red_reg;
+5
drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
··· 132 132 uint32_t width, 133 133 uint32_t height) 134 134 { 135 + (void)param; 136 + (void)width; 137 + (void)height; 135 138 struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base); 136 139 uint32_t cur_en = pos->enable ? 1 : 0; 137 140 ··· 240 237 enum dc_color_space color_space, 241 238 struct dc_csc_transform cursor_csc_color_matrix) 242 239 { 240 + (void)color_space; 241 + (void)cursor_csc_color_matrix; 243 242 //Since we don't have cursor matrix information, force bypass mode by passing in unknown color space 244 243 dpp401_program_cursor_csc(dpp_base, COLOR_SPACE_UNKNOWN, NULL); 245 244 }
+7 -6
drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
··· 680 680 } else { 681 681 build_dsc_enc_caps(dsc, dsc_enc_caps); 682 682 } 683 + 684 + if (dsc->ctx->dc->debug.native422_support) 685 + dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1; 683 686 } 684 687 685 688 /* Returns 'false' if no intersection was found for at least one capability. ··· 1100 1097 branch_max_throughput_mps = dsc_sink_caps->branch_overall_throughput_0_mps; 1101 1098 break; 1102 1099 case PIXEL_ENCODING_YCBCR422: 1103 - if (policy.ycbcr422_simple) { 1100 + is_dsc_possible = (bool)dsc_common_caps.color_formats.bits.YCBCR_NATIVE_422; 1101 + sink_per_slice_throughput_mps = dsc_sink_caps->throughput_mode_1_mps; 1102 + branch_max_throughput_mps = dsc_sink_caps->branch_overall_throughput_1_mps; 1103 + if (!is_dsc_possible) { 1104 1104 is_dsc_possible = (bool)dsc_common_caps.color_formats.bits.YCBCR_SIMPLE_422; 1105 1105 dsc_cfg->ycbcr422_simple = is_dsc_possible; 1106 1106 sink_per_slice_throughput_mps = dsc_sink_caps->throughput_mode_0_mps; 1107 - } else { 1108 - is_dsc_possible = (bool)dsc_common_caps.color_formats.bits.YCBCR_NATIVE_422; 1109 - sink_per_slice_throughput_mps = dsc_sink_caps->throughput_mode_1_mps; 1110 - branch_max_throughput_mps = dsc_sink_caps->branch_overall_throughput_1_mps; 1111 1107 } 1112 1108 break; 1113 1109 case PIXEL_ENCODING_YCBCR420: ··· 1406 1404 policy->min_target_bpp = 8; 1407 1405 /* DP specs limits to 3 x bpc */ 1408 1406 policy->max_target_bpp = 3 * bpc; 1409 - policy->ycbcr422_simple = true; 1410 1407 break; 1411 1408 case PIXEL_ENCODING_YCBCR420: 1412 1409 /* DP specs limits to 6 */
+1 -1
drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
··· 100 100 dsc_enc_caps->color_formats.bits.RGB = 1; 101 101 dsc_enc_caps->color_formats.bits.YCBCR_444 = 1; 102 102 dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 1; 103 - dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1; 103 + dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 0; 104 104 dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_420 = 1; 105 105 106 106 dsc_enc_caps->color_depth.bits.COLOR_DEPTH_8_BPC = 1;
+1 -1
drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c
··· 128 128 dsc_enc_caps->color_formats.bits.RGB = 1; 129 129 dsc_enc_caps->color_formats.bits.YCBCR_444 = 1; 130 130 dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 1; 131 - dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1; 131 + dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 0; 132 132 dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_420 = 1; 133 133 134 134 dsc_enc_caps->color_depth.bits.COLOR_DEPTH_8_BPC = 1;
+6 -1
drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
··· 78 78 dsc_enc_caps->color_formats.bits.RGB = 1; 79 79 dsc_enc_caps->color_formats.bits.YCBCR_444 = 1; 80 80 dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 1; 81 - dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1; 81 + dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 0; 82 82 dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_420 = 1; 83 83 84 84 dsc_enc_caps->color_depth.bits.COLOR_DEPTH_8_BPC = 1; ··· 107 107 REG_GET(DSCC_PPS_CONFIG7, SLICE_BPG_OFFSET, &s->dsc_slice_bpg_offset); 108 108 REG_GET_2(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, &s->dsc_fw_en, 109 109 DSCRM_DSC_OPP_PIPE_SOURCE, &s->dsc_opp_source); 110 + REG_GET(DSCC_PPS_CONFIG1, BLOCK_PRED_ENABLE, &s->dsc_block_pred_enable); 111 + REG_GET(DSCC_PPS_CONFIG0, LINEBUF_DEPTH, &s->dsc_line_buf_depth); 112 + REG_GET(DSCC_PPS_CONFIG0, DSC_VERSION_MINOR, &s->dsc_version_minor); 113 + REG_GET(DSCC_CONFIG1, DSCC_RATE_CONTROL_BUFFER_MODEL_SIZE, &s->dsc_rc_buffer_size); 114 + REG_GET(DSCC_PPS_CONFIG0, SIMPLE_422, &s->dsc_simple_422); 110 115 } 111 116 112 117
+5
drivers/gpu/drm/amd/display/dc/dsc/dsc.h
··· 64 64 uint32_t dsc_chunk_size; 65 65 uint32_t dsc_fw_en; 66 66 uint32_t dsc_opp_source; 67 + uint32_t dsc_block_pred_enable; 68 + uint32_t dsc_line_buf_depth; 69 + uint32_t dsc_version_minor; 70 + uint32_t dsc_rc_buffer_size; 71 + uint32_t dsc_simple_422; 67 72 }; 68 73 69 74 struct dcn_dsc_reg_state {
+1
drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.c
··· 45 45 46 46 static bool dwb3_get_caps(struct dwbc *dwbc, struct dwb_caps *caps) 47 47 { 48 + (void)dwbc; 48 49 if (caps) { 49 50 caps->adapter_id = 0; /* we only support 1 adapter currently */ 50 51 caps->hw_version = DCN_VERSION_3_0;
+1
drivers/gpu/drm/amd/display/dc/gpio/dcn42/hw_translate_dcn42.c
··· 45 45 enum gpio_id *id, 46 46 uint32_t *en) 47 47 { 48 + (void)mask; 48 49 switch (offset) { 49 50 /* HPD */ 50 51 case REG(HPD0_DC_HPD_INT_STATUS):
+1
drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
··· 60 60 enum dce_version dce_version, 61 61 enum dce_environment dce_environment) 62 62 { 63 + (void)dce_environment; 63 64 switch (dce_version) { 64 65 #if defined(CONFIG_DRM_AMD_DC_SI) 65 66 case DCE_VERSION_6_0:
+1
drivers/gpu/drm/amd/display/dc/gpio/hw_gpio.c
··· 199 199 void dal_hw_gpio_destruct( 200 200 struct hw_gpio *pin) 201 201 { 202 + (void)pin; 202 203 ASSERT(!pin->base.opened); 203 204 }
+1
drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
··· 64 64 enum dce_version dce_version, 65 65 enum dce_environment dce_environment) 66 66 { 67 + (void)dce_environment; 67 68 switch (dce_version) { 68 69 #if defined(CONFIG_DRM_AMD_DC_SI) 69 70 case DCE_VERSION_6_0:
+1
drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
··· 943 943 unsigned int dccg_ref_freq_inKhz, 944 944 unsigned int *dchub_ref_freq_inKhz) 945 945 { 946 + (void)dccg_ref_freq_inKhz; 946 947 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 947 948 uint32_t ref_div = 0; 948 949 uint32_t ref_en = 0;
+1
drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
··· 259 259 unsigned int dccg_ref_freq_inKhz, 260 260 unsigned int *dchub_ref_freq_inKhz) 261 261 { 262 + (void)dccg_ref_freq_inKhz; 262 263 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 263 264 uint32_t ref_div = 0; 264 265 uint32_t ref_en = 0;
+4
drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
··· 70 70 unsigned int refclk_mhz, 71 71 bool safe_to_lower) 72 72 { 73 + (void)refclk_mhz; 73 74 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 74 75 bool wm_pending = false; 75 76 ··· 189 188 unsigned int refclk_mhz, 190 189 bool safe_to_lower) 191 190 { 191 + (void)refclk_mhz; 192 192 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 193 193 bool wm_pending = false; 194 194 ··· 289 287 unsigned int refclk_mhz, 290 288 bool safe_to_lower) 291 289 { 290 + (void)refclk_mhz; 292 291 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 293 292 bool wm_pending = false; 294 293 ··· 417 414 unsigned int refclk_mhz, 418 415 bool safe_to_lower) 419 416 { 417 + (void)refclk_mhz; 420 418 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 421 419 bool wm_pending = false; 422 420
+2
drivers/gpu/drm/amd/display/dc/hubbub/dcn42/dcn42_hubbub.c
··· 488 488 489 489 static void hubbub42_set_request_limit(struct hubbub *hubbub, int memory_channel_count, int words_per_channel) 490 490 { 491 + (void)memory_channel_count; 492 + (void)words_per_channel; 491 493 struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub); 492 494 uint32_t request_limit = 96; //MAX(12 * memory_channel_count, 96); 493 495
+2
drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
··· 143 143 const struct dc_tiling_info *info, 144 144 const enum surface_pixel_format pixel_format) 145 145 { 146 + (void)pixel_format; 146 147 struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); 147 148 148 149 REG_UPDATE_6(DCSURF_ADDR_CONFIG, ··· 564 563 bool horizontal_mirror, 565 564 unsigned int compat_level) 566 565 { 566 + (void)compat_level; 567 567 hubp1_dcc_control(hubp, dcc->enable, dcc->independent_64b_blks); 568 568 hubp1_program_tiling(hubp, tiling_info, format); 569 569 hubp1_program_size(hubp, format, plane_size, dcc);
+2
drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
··· 313 313 const struct dc_tiling_info *info, 314 314 const enum surface_pixel_format pixel_format) 315 315 { 316 + (void)pixel_format; 316 317 REG_UPDATE_3(DCSURF_ADDR_CONFIG, 317 318 NUM_PIPES, log_2(info->gfx9.num_pipes), 318 319 PIPE_INTERLEAVE, info->gfx9.pipe_interleave, ··· 558 557 bool horizontal_mirror, 559 558 unsigned int compat_level) 560 559 { 560 + (void)compat_level; 561 561 struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); 562 562 563 563 hubp2_dcc_control(hubp, dcc->enable, dcc->independent_64b_blks);
+2
drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
··· 321 321 const struct dc_tiling_info *info, 322 322 const enum surface_pixel_format pixel_format) 323 323 { 324 + (void)pixel_format; 324 325 REG_UPDATE_4(DCSURF_ADDR_CONFIG, 325 326 NUM_PIPES, log_2(info->gfx9.num_pipes), 326 327 PIPE_INTERLEAVE, info->gfx9.pipe_interleave, ··· 419 418 bool horizontal_mirror, 420 419 unsigned int compat_level) 421 420 { 421 + (void)compat_level; 422 422 struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); 423 423 424 424 hubp3_dcc_control_sienna_cichlid(hubp, dcc);
+1
drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
··· 179 179 bool horizontal_mirror, 180 180 unsigned int compat_level) 181 181 { 182 + (void)compat_level; 182 183 struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); 183 184 184 185 hubp3_dcc_control_sienna_cichlid(hubp, dcc);
+3
drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
··· 657 657 const struct dc_tiling_info *info, 658 658 const enum surface_pixel_format pixel_format) 659 659 { 660 + (void)pixel_format; 660 661 /* DCSURF_ADDR_CONFIG still shows up in reg spec, but does not need to be programmed for DCN4x 661 662 * All 4 fields NUM_PIPES, PIPE_INTERLEAVE, MAX_COMPRESSED_FRAGS and NUM_PKRS are irrelevant. 662 663 * ··· 672 671 const struct plane_size *plane_size, 673 672 struct dc_plane_dcc_param *dcc) 674 673 { 674 + (void)dcc; 675 675 struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); 676 676 uint32_t pitch, pitch_c; 677 677 bool use_pitch_c = false; ··· 711 709 bool horizontal_mirror, 712 710 unsigned int compat_level) 713 711 { 712 + (void)compat_level; 714 713 struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); 715 714 716 715 hubp401_dcc_control(hubp, dcc);
+1
drivers/gpu/drm/amd/display/dc/hubp/dcn42/dcn42_hubp.c
··· 301 301 bool horizontal_mirror, 302 302 unsigned int compat_level) 303 303 { 304 + (void)compat_level; 304 305 struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); 305 306 306 307 hubp3_dcc_control_sienna_cichlid(hubp, dcc);
+31 -9
drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
··· 201 201 struct dc_context *ctx, 202 202 bool clock_gating) 203 203 { 204 + (void)ctx; 205 + (void)clock_gating; 204 206 /*TODO*/ 205 207 } 206 208 ··· 286 284 dce110_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, 287 285 const struct dc_plane_state *plane_state) 288 286 { 287 + (void)dc; 289 288 struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp; 290 289 const struct dc_transfer_func *tf = NULL; 291 290 struct ipp_prescale_params prescale_params = { 0 }; ··· 491 488 seg_distr[8] = 4; 492 489 seg_distr[9] = 4; 493 490 seg_distr[10] = 0; 494 - seg_distr[11] = -1; 495 - seg_distr[12] = -1; 496 - seg_distr[13] = -1; 497 - seg_distr[14] = -1; 498 - seg_distr[15] = -1; 491 + seg_distr[11] = (uint32_t)-1; 492 + seg_distr[12] = (uint32_t)-1; 493 + seg_distr[13] = (uint32_t)-1; 494 + seg_distr[14] = (uint32_t)-1; 495 + seg_distr[15] = (uint32_t)-1; 499 496 } 500 497 501 498 for (k = 0; k < 16; k++) { 502 - if (seg_distr[k] != -1) 499 + if (seg_distr[k] != (uint32_t)-1) 503 500 hw_points += (1 << seg_distr[k]); 504 501 } 505 502 ··· 610 607 dce110_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, 611 608 const struct dc_stream_state *stream) 612 609 { 610 + (void)dc; 613 611 struct transform *xfm = pipe_ctx->plane_res.xfm; 614 612 615 613 xfm->funcs->opp_power_on_regamma_lut(xfm, true); ··· 1543 1539 struct dc_state *context, 1544 1540 struct dc *dc) 1545 1541 { 1542 + (void)context; 1546 1543 struct dc_stream_state *stream = pipe_ctx->stream; 1547 1544 struct pipe_ctx *pipe_ctx_old = &dc->current_state->res_ctx. 1548 1545 pipe_ctx[pipe_ctx->pipe_idx]; ··· 1573 1568 return DC_ERROR_UNEXPECTED; 1574 1569 } 1575 1570 1576 - if (dc_is_hdmi_tmds_signal(stream->signal)) { 1571 + if (dc_is_tmds_signal(stream->signal)) { 1577 1572 stream->link->phy_state.symclk_ref_cnts.otg = 1; 1578 1573 if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF) 1579 1574 stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF; ··· 1991 1986 struct pipe_ctx *pipe_ctx = NULL; 1992 1987 struct dce_hwseq *hws = dc->hwseq; 1993 1988 int edp_with_sink_num; 1994 - int edp_num; 1989 + unsigned int edp_num; 1995 1990 int edp_stream_num; 1996 1991 int i; 1997 1992 bool can_apply_edp_fast_boot = false; ··· 2423 2418 BREAK_TO_DEBUGGER(); 2424 2419 } 2425 2420 pipe_ctx_old->stream_res.tg->funcs->disable_crtc(pipe_ctx_old->stream_res.tg); 2426 - if (dc_is_hdmi_tmds_signal(pipe_ctx_old->stream->signal)) 2421 + if (dc_is_tmds_signal(pipe_ctx_old->stream->signal)) 2427 2422 pipe_ctx_old->stream->link->phy_state.symclk_ref_cnts.otg = 0; 2428 2423 pipe_ctx_old->plane_res.mi->funcs->free_mem_input( 2429 2424 pipe_ctx_old->plane_res.mi, dc->current_state->stream_count); ··· 2733 2728 static void update_plane_addr(const struct dc *dc, 2734 2729 struct pipe_ctx *pipe_ctx) 2735 2730 { 2731 + (void)dc; 2736 2732 struct dc_plane_state *plane_state = pipe_ctx->plane_state; 2737 2733 2738 2734 if (plane_state == NULL) ··· 2820 2814 int group_size, 2821 2815 struct pipe_ctx *grouped_pipes[]) 2822 2816 { 2817 + (void)state; 2818 + (void)group_index; 2823 2819 struct dcp_gsl_params gsl_params = { 0 }; 2824 2820 int i; 2825 2821 DC_LOGGER_INIT(dc->ctx); ··· 2897 2889 2898 2890 static void dce110_init_pipes(struct dc *dc, struct dc_state *context) 2899 2891 { 2892 + (void)context; 2893 + (void)dc; 2900 2894 // Do nothing 2901 2895 } 2902 2896 ··· 3164 3154 struct dc *dc, 3165 3155 struct dc_state *context) 3166 3156 { 3157 + (void)dc; 3158 + (void)context; 3167 3159 } 3168 3160 3169 3161 static void dce110_power_down_fe(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx) 3170 3162 { 3163 + (void)state; 3171 3164 struct dce_hwseq *hws = dc->hwseq; 3172 3165 int fe_idx = pipe_ctx->plane_res.mi ? 3173 3166 pipe_ctx->plane_res.mi->inst : pipe_ctx->pipe_idx; ··· 3191 3178 struct resource_pool *res_pool, 3192 3179 struct pipe_ctx *pipe_ctx) 3193 3180 { 3181 + (void)dc; 3182 + (void)res_pool; 3183 + (void)pipe_ctx; 3194 3184 /* do nothing*/ 3195 3185 } 3196 3186 ··· 3203 3187 uint16_t *matrix, 3204 3188 int opp_id) 3205 3189 { 3190 + (void)dc; 3191 + (void)colorspace; 3192 + (void)matrix; 3193 + (void)opp_id; 3206 3194 int i; 3207 3195 struct out_csc_color_matrix tbl_entry; 3208 3196 ··· 3351 3331 enum clock_source_id clock_source, 3352 3332 uint32_t pixel_clock) 3353 3333 { 3334 + (void)link_res; 3354 3335 link->link_enc->funcs->enable_lvds_output( 3355 3336 link->link_enc, 3356 3337 clock_source, ··· 3366 3345 enum dc_color_depth color_depth, 3367 3346 uint32_t pixel_clock) 3368 3347 { 3348 + (void)link_res; 3369 3349 link->link_enc->funcs->enable_tmds_output( 3370 3350 link->link_enc, 3371 3351 clock_source,
+4
drivers/gpu/drm/amd/display/dc/hwss/dce120/dce120_hwseq.c
··· 154 154 struct dc_bios *dcb, 155 155 enum pipe_gating_control power_gating) 156 156 { 157 + (void)dc; 158 + (void)controller_id; 159 + (void)dcb; 160 + (void)power_gating; 157 161 /* disable for bringup */ 158 162 #if 0 159 163 enum bp_result bp_result = BP_RESULT_OK;
+22 -1
drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
··· 86 86 struct dc_log_buffer_ctx *log_ctx, 87 87 uint32_t ref_cycle) 88 88 { 89 + (void)log_ctx; 89 90 const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000; 90 91 static const unsigned int frac = 1000; 91 92 uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz; ··· 253 252 static void log_mpc_crc(struct dc *dc, 254 253 struct dc_log_buffer_ctx *log_ctx) 255 254 { 255 + (void)log_ctx; 256 256 struct dc_context *dc_ctx = dc->ctx; 257 257 struct dce_hwseq *hws = dc->hwseq; 258 258 ··· 452 450 static void dcn10_log_color_state(struct dc *dc, 453 451 struct dc_log_buffer_ctx *log_ctx) 454 452 { 453 + (void)log_ctx; 455 454 struct dc_context *dc_ctx = dc->ctx; 456 455 struct resource_pool *pool = dc->res_pool; 457 456 bool is_gamut_remap_available = false; ··· 816 813 817 814 bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx) 818 815 { 816 + (void)dc; 819 817 struct hubp *hubp = pipe_ctx->plane_res.hubp; 820 818 struct timing_generator *tg = pipe_ctx->stream_res.tg; 821 819 ··· 1185 1181 struct dc_state *context, 1186 1182 struct dc *dc) 1187 1183 { 1184 + (void)context; 1188 1185 struct dc_stream_state *stream = pipe_ctx->stream; 1189 1186 enum dc_color_space color_space; 1190 1187 struct tg_color black_color = {0}; ··· 1289 1284 struct pipe_ctx *pipe_ctx, 1290 1285 struct dc_state *context) 1291 1286 { 1287 + (void)context; 1292 1288 int i; 1293 1289 struct dc_link *link; 1294 1290 DC_LOGGER_INIT(dc->ctx); ··· 1557 1551 1558 1552 void dcn10_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx) 1559 1553 { 1554 + (void)state; 1560 1555 struct dce_hwseq *hws = dc->hwseq; 1561 1556 DC_LOGGER_INIT(dc->ctx); 1562 1557 ··· 1911 1904 { 1912 1905 struct dc_link *edp_links[MAX_NUM_EDP]; 1913 1906 struct dc_link *edp_link = NULL; 1914 - int edp_num; 1907 + unsigned int edp_num; 1915 1908 int i = 0; 1916 1909 1917 1910 dc_get_edp_links(dc, edp_links, &edp_num); ··· 2011 2004 2012 2005 void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx) 2013 2006 { 2007 + (void)dc; 2014 2008 bool addr_patched = false; 2015 2009 PHYSICAL_ADDRESS_LOC addr; 2016 2010 struct dc_plane_state *plane_state = pipe_ctx->plane_state; ··· 2038 2030 bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, 2039 2031 const struct dc_plane_state *plane_state) 2040 2032 { 2033 + (void)dc; 2041 2034 struct dpp *dpp_base = pipe_ctx->plane_res.dpp; 2042 2035 const struct dc_transfer_func *tf = NULL; 2043 2036 bool result = true; ··· 2481 2472 int group_size, 2482 2473 struct pipe_ctx *grouped_pipes[]) 2483 2474 { 2475 + (void)group_index; 2484 2476 struct output_pixel_processor *opp; 2485 2477 struct timing_generator *tg; 2486 2478 int i, width = 0, height = 0, master; ··· 2547 2537 int group_size, 2548 2538 struct pipe_ctx *grouped_pipes[]) 2549 2539 { 2540 + (void)group_index; 2550 2541 struct output_pixel_processor *opp; 2551 2542 struct timing_generator *tg; 2552 2543 int i, width = 0, height = 0; ··· 2652 2641 struct vm_system_aperture_param *apt, 2653 2642 struct dce_hwseq *hws) 2654 2643 { 2644 + (void)hubp1; 2655 2645 PHYSICAL_ADDRESS_LOC physical_page_number; 2656 2646 uint32_t logical_addr_low; 2657 2647 uint32_t logical_addr_high; ··· 2678 2666 struct vm_context0_param *vm0, 2679 2667 struct dce_hwseq *hws) 2680 2668 { 2669 + (void)hubp1; 2681 2670 PHYSICAL_ADDRESS_LOC fb_base; 2682 2671 PHYSICAL_ADDRESS_LOC fb_offset; 2683 2672 uint32_t fb_base_value; ··· 2737 2724 struct pipe_ctx *pipe_ctx, 2738 2725 struct dc_state *context) 2739 2726 { 2727 + (void)context; 2740 2728 struct dce_hwseq *hws = dc->hwseq; 2741 2729 2742 2730 if (dc->debug.sanity_checks) { ··· 2835 2821 uint16_t *matrix, 2836 2822 int opp_id) 2837 2823 { 2824 + (void)dc; 2825 + (void)opp_id; 2838 2826 if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) { 2839 2827 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) { 2840 2828 ··· 3602 3586 struct dc_bios *dcb, 3603 3587 enum pipe_gating_control power_gating) 3604 3588 { 3589 + (void)dc; 3590 + (void)controller_id; 3591 + (void)dcb; 3592 + (void)power_gating; 3605 3593 return true; 3606 3594 } 3607 3595 ··· 4072 4052 uint32_t clk_khz, 4073 4053 uint32_t stepping) 4074 4054 { 4055 + (void)stepping; 4075 4056 struct dc_state *context = dc->current_state; 4076 4057 struct dc_clock_config clock_cfg = {0}; 4077 4058 struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
+11 -2
drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
··· 75 75 void dcn20_log_color_state(struct dc *dc, 76 76 struct dc_log_buffer_ctx *log_ctx) 77 77 { 78 + (void)log_ctx; 78 79 struct dc_context *dc_ctx = dc->ctx; 79 80 struct resource_pool *pool = dc->res_pool; 80 81 bool is_gamut_remap_available = false; ··· 380 379 struct pipe_ctx *pipe_ctx, 381 380 bool enable_triple_buffer) 382 381 { 382 + (void)dc; 383 383 if (pipe_ctx->plane_res.hubp && pipe_ctx->plane_res.hubp->funcs) { 384 384 pipe_ctx->plane_res.hubp->funcs->hubp_enable_tripleBuffer( 385 385 pipe_ctx->plane_res.hubp, ··· 895 893 dccg->funcs->set_dtbclk_dto(dccg, &dto_params); 896 894 } 897 895 898 - if (dc_is_hdmi_tmds_signal(stream->signal)) { 896 + if (dc_is_tmds_signal(stream->signal)) { 899 897 stream->link->phy_state.symclk_ref_cnts.otg = 1; 900 898 if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF) 901 899 stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF; ··· 1177 1175 1178 1176 void dcn20_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx) 1179 1177 { 1178 + (void)context; 1179 + (void)dc; 1180 1180 struct pipe_ctx *odm_pipe; 1181 1181 int opp_cnt = 1; 1182 1182 int opp_inst[MAX_PIPES] = { pipe_ctx->stream_res.opp->inst }; ··· 1301 1297 void dcn20_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx, 1302 1298 struct dc_state *context) 1303 1299 { 1300 + (void)context; 1304 1301 //if (dc->debug.sanity_checks) { 1305 1302 // dcn10_verify_allow_pstate_change_high(dc); 1306 1303 //} ··· 2657 2652 struct dc_virtual_addr_space_config *va_config, 2658 2653 int vmid) 2659 2654 { 2655 + (void)hws; 2660 2656 struct dcn_hubbub_virt_addr_config config; 2661 2657 2662 2658 if (vmid == 0) { ··· 2676 2670 2677 2671 int dcn20_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config) 2678 2672 { 2673 + (void)hws; 2679 2674 struct dcn_hubbub_phys_addr_config config; 2680 2675 2681 2676 config.system_aperture.fb_top = pa_config->system_aperture.fb_top; ··· 2806 2799 struct pipe_ctx *pipe_ctx, 2807 2800 struct dc_state *context) 2808 2801 { 2802 + (void)context; 2809 2803 struct dc_link *link = pipe_ctx->stream->link; 2810 2804 const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); 2811 2805 struct dccg *dccg = dc->res_pool->dccg; ··· 2864 2856 * the case where the same symclk is shared across multiple otg 2865 2857 * instances 2866 2858 */ 2867 - if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) 2859 + if (dc_is_tmds_signal(pipe_ctx->stream->signal)) 2868 2860 link->phy_state.symclk_ref_cnts.otg = 0; 2869 2861 if (link->phy_state.symclk_state == SYMCLK_ON_TX_OFF) { 2870 2862 link_hwss->disable_link_output(link, ··· 3253 3245 const struct tg_color *solid_color, 3254 3246 int width, int height, int offset) 3255 3247 { 3248 + (void)dc; 3256 3249 pipe_ctx->stream_res.opp->funcs->opp_set_disp_pattern_generator(pipe_ctx->stream_res.opp, test_pattern, 3257 3250 color_space, color_depth, solid_color, width, height, offset); 3258 3251 }
+4 -1
drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
··· 75 75 void dcn30_log_color_state(struct dc *dc, 76 76 struct dc_log_buffer_ctx *log_ctx) 77 77 { 78 + (void)log_ctx; 78 79 struct dc_context *dc_ctx = dc->ctx; 79 80 struct resource_pool *pool = dc->res_pool; 80 81 bool is_gamut_remap_available = false; ··· 646 645 struct dc_bios *dcb = dc->ctx->dc_bios; 647 646 struct resource_pool *res_pool = dc->res_pool; 648 647 int i; 649 - int edp_num; 648 + unsigned int edp_num; 650 649 uint32_t backlight = MAX_BACKLIGHT_LEVEL; 651 650 uint32_t user_level = MAX_BACKLIGHT_LEVEL; 652 651 ··· 1184 1183 const struct tg_color *solid_color, 1185 1184 int width, int height, int offset) 1186 1185 { 1186 + (void)dc; 1187 1187 pipe_ctx->stream_res.opp->funcs->opp_set_disp_pattern_generator(pipe_ctx->stream_res.opp, test_pattern, 1188 1188 color_space, color_depth, solid_color, width, height, offset); 1189 1189 } ··· 1239 1237 struct timing_generator *tg, 1240 1238 struct dc_underflow_debug_data *out_data) 1241 1239 { 1240 + (void)tg; 1242 1241 struct hubbub *hubbub = dc->res_pool->hubbub; 1243 1242 1244 1243 if (hubbub) {
+11
drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_hwseq.c
··· 45 45 46 46 void dcn303_dpp_pg_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool power_on) 47 47 { 48 + (void)dpp_inst; 49 + (void)hws; 50 + (void)power_on; 48 51 /*DCN303 removes PG registers*/ 49 52 } 50 53 51 54 void dcn303_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on) 52 55 { 56 + (void)hubp_inst; 57 + (void)hws; 58 + (void)power_on; 53 59 /*DCN303 removes PG registers*/ 54 60 } 55 61 56 62 void dcn303_dsc_pg_control(struct dce_hwseq *hws, unsigned int dsc_inst, bool power_on) 57 63 { 64 + (void)dsc_inst; 65 + (void)hws; 66 + (void)power_on; 58 67 /*DCN303 removes PG registers*/ 59 68 } 60 69 61 70 void dcn303_enable_power_gating_plane(struct dce_hwseq *hws, bool enable) 62 71 { 72 + (void)enable; 73 + (void)hws; 63 74 /*DCN303 removes PG registers*/ 64 75 }
+3 -1
drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
··· 484 484 485 485 int dcn31_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config) 486 486 { 487 + (void)hws; 487 488 struct dcn_hubbub_phys_addr_config config = {0}; 488 489 489 490 config.system_aperture.fb_top = pa_config->system_aperture.fb_top; ··· 512 511 struct pipe_ctx *pipe_ctx, 513 512 struct dc_state *context) 514 513 { 514 + (void)context; 515 515 struct dc_link *link; 516 516 517 517 if (pipe_ctx->stream_res.stream_enc == NULL) { ··· 550 548 * the case where the same symclk is shared across multiple otg 551 549 * instances 552 550 */ 553 - if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) 551 + if (dc_is_tmds_signal(pipe_ctx->stream->signal)) 554 552 link->phy_state.symclk_ref_cnts.otg = 0; 555 553 556 554 if (pipe_ctx->top_pipe == NULL) {
+1
drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
··· 172 172 173 173 void dcn314_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx) 174 174 { 175 + (void)context; 175 176 struct pipe_ctx *odm_pipe; 176 177 int opp_cnt = 0; 177 178 int opp_inst[MAX_PIPES] = {0};
+3 -1
drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
··· 570 570 struct pipe_ctx *pipe_ctx, 571 571 const struct dc_stream_state *stream) 572 572 { 573 + (void)dc; 573 574 int mpcc_id = pipe_ctx->plane_res.hubp->inst; 574 575 struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; 575 576 const struct pwl_params *params = NULL; ··· 794 793 struct dc_bios *dcb = dc->ctx->dc_bios; 795 794 struct resource_pool *res_pool = dc->res_pool; 796 795 int i; 797 - int edp_num; 796 + unsigned int edp_num; 798 797 uint32_t backlight = MAX_BACKLIGHT_LEVEL; 799 798 uint32_t user_level = MAX_BACKLIGHT_LEVEL; 800 799 ··· 1144 1143 1145 1144 void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx) 1146 1145 { 1146 + (void)context; 1147 1147 struct pipe_ctx *odm_pipe; 1148 1148 int opp_cnt = 0; 1149 1149 int opp_inst[MAX_PIPES] = {0};
+4 -2
drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
··· 428 428 429 429 void dcn35_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx) 430 430 { 431 + (void)context; 431 432 struct pipe_ctx *odm_pipe; 432 433 int opp_cnt = 0; 433 434 int opp_inst[MAX_PIPES] = {0}; ··· 521 520 { 522 521 struct dc_link *edp_links[MAX_NUM_EDP]; 523 522 struct dc_link *edp_link = NULL; 524 - int edp_num; 523 + unsigned int edp_num; 525 524 int i = 0; 526 525 527 526 dc_get_edp_links(dc, edp_links, &edp_num); ··· 817 816 void dcn35_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx, 818 817 struct dc_state *context) 819 818 { 819 + (void)context; 820 820 struct dpp *dpp = pipe_ctx->plane_res.dpp; 821 821 822 822 /* enable DCFCLK current DCHUB */ ··· 923 921 bool hpo_frl_stream_enc_acquired = false; 924 922 bool hpo_dp_stream_enc_acquired = false; 925 923 int i = 0, j = 0; 926 - int edp_num = 0; 924 + unsigned int edp_num = 0; 927 925 struct dc_link *edp_links[MAX_NUM_EDP] = { NULL }; 928 926 929 927 memset(update_state, 0, sizeof(struct pg_block_update));
+62 -70
drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
··· 140 140 struct dc_bios *dcb = dc->ctx->dc_bios; 141 141 struct resource_pool *res_pool = dc->res_pool; 142 142 int i; 143 - int edp_num; 143 + unsigned int edp_num; 144 144 uint32_t backlight = MAX_BACKLIGHT_LEVEL; 145 145 uint32_t user_level = MAX_BACKLIGHT_LEVEL; 146 + bool dchub_ref_freq_changed; 146 147 int current_dchub_ref_freq = 0; 147 148 148 149 if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->init_clocks) { ··· 358 357 dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr; 359 358 dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver > 0; 360 359 dc->caps.dmub_caps.fams_ver = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver; 360 + 361 + /* sw and fw FAMS versions must match for support */ 361 362 dc->debug.fams2_config.bits.enable &= 362 - dc->caps.dmub_caps.fams_ver == dc->debug.fams_version.ver; // sw & fw fams versions must match for support 363 - if ((!dc->debug.fams2_config.bits.enable && dc->res_pool->funcs->update_bw_bounding_box) 364 - || res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000 != current_dchub_ref_freq) { 363 + dc->caps.dmub_caps.fams_ver == dc->debug.fams_version.ver; 364 + dchub_ref_freq_changed = 365 + res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000 != current_dchub_ref_freq; 366 + if ((!dc->debug.fams2_config.bits.enable || dchub_ref_freq_changed) && 367 + dc->res_pool->funcs->update_bw_bounding_box && 368 + dc->clk_mgr && dc->clk_mgr->bw_params) { 365 369 /* update bounding box if FAMS2 disabled, or if dchub clk has changed */ 366 370 if (dc->clk_mgr) 367 - dc->res_pool->funcs->update_bw_bounding_box(dc, 368 - dc->clk_mgr->bw_params); 371 + dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params); 369 372 } 370 373 } 371 374 } 372 375 373 - void dcn401_trigger_3dlut_dma_load(struct pipe_ctx *pipe_ctx) 376 + void dcn401_trigger_3dlut_dma_load(struct dc *dc, struct pipe_ctx *pipe_ctx) 374 377 { 375 - const struct pipe_ctx *primary_dpp_pipe_ctx = resource_get_primary_dpp_pipe(pipe_ctx); 376 - struct hubp *primary_hubp = primary_dpp_pipe_ctx ? 377 - primary_dpp_pipe_ctx->plane_res.hubp : NULL; 378 + (void)dc; 379 + struct hubp *hubp = pipe_ctx->plane_res.hubp; 378 380 379 - if (primary_hubp && primary_hubp->funcs->hubp_enable_3dlut_fl) { 380 - primary_hubp->funcs->hubp_enable_3dlut_fl(primary_hubp, true); 381 + if (hubp->funcs->hubp_enable_3dlut_fl) { 382 + hubp->funcs->hubp_enable_3dlut_fl(hubp, true); 381 383 } 382 384 } 383 385 ··· 388 384 const struct dc_plane_state *plane_state) 389 385 { 390 386 struct dc *dc = pipe_ctx->plane_res.hubp->ctx->dc; 391 - const struct pipe_ctx *primary_dpp_pipe_ctx = resource_get_primary_dpp_pipe(pipe_ctx); 392 387 struct dpp *dpp_base = pipe_ctx->plane_res.dpp; 393 388 struct hubp *hubp = pipe_ctx->plane_res.hubp; 394 - struct hubp *primary_hubp = primary_dpp_pipe_ctx ? 395 - primary_dpp_pipe_ctx->plane_res.hubp : NULL; 396 389 const struct dc_plane_cm *cm = &plane_state->cm; 397 390 int mpcc_id = hubp->inst; 398 391 struct mpc *mpc = dc->res_pool->mpc; ··· 487 486 mpc->funcs->program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, 12, mpcc_id); 488 487 489 488 if (mpc->funcs->update_3dlut_fast_load_select) 490 - mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, primary_hubp->inst); 489 + mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, hubp->inst); 491 490 492 491 /* HUBP */ 493 - if (primary_hubp->inst == hubp->inst) { 494 - /* only program if this is the primary dpp pipe for the given plane */ 495 - if (hubp->funcs->hubp_program_3dlut_fl_config) 496 - hubp->funcs->hubp_program_3dlut_fl_config(hubp, &cm->lut3d_dma); 492 + if (hubp->funcs->hubp_program_3dlut_fl_config) 493 + hubp->funcs->hubp_program_3dlut_fl_config(hubp, &cm->lut3d_dma); 497 494 498 - if (hubp->funcs->hubp_program_3dlut_fl_crossbar) 499 - hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp, cm->lut3d_dma.format); 495 + if (hubp->funcs->hubp_program_3dlut_fl_crossbar) 496 + hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp, cm->lut3d_dma.format); 500 497 501 - if (hubp->funcs->hubp_program_3dlut_fl_addr) 502 - hubp->funcs->hubp_program_3dlut_fl_addr(hubp, &cm->lut3d_dma.addr); 498 + if (hubp->funcs->hubp_program_3dlut_fl_addr) 499 + hubp->funcs->hubp_program_3dlut_fl_addr(hubp, &cm->lut3d_dma.addr); 503 500 504 - if (hubp->funcs->hubp_enable_3dlut_fl) { 505 - hubp->funcs->hubp_enable_3dlut_fl(hubp, true); 506 - } else { 507 - /* GPU memory only supports fast load path */ 508 - BREAK_TO_DEBUGGER(); 509 - lut_enable = false; 510 - result = false; 511 - } 501 + if (hubp->funcs->hubp_enable_3dlut_fl) { 502 + hubp->funcs->hubp_enable_3dlut_fl(hubp, true); 512 503 } else { 513 - /* re-trigger priamry HUBP to load 3DLUT */ 514 - if (primary_hubp->funcs->hubp_enable_3dlut_fl) { 515 - primary_hubp->funcs->hubp_enable_3dlut_fl(primary_hubp, true); 516 - } 517 - 518 - /* clear FL setup on this pipe's HUBP */ 519 - memset(&lut3d_dma, 0, sizeof(lut3d_dma)); 520 - if (hubp->funcs->hubp_program_3dlut_fl_config) 521 - hubp->funcs->hubp_program_3dlut_fl_config(hubp, &lut3d_dma); 522 - 523 - if (hubp->funcs->hubp_enable_3dlut_fl) 524 - hubp->funcs->hubp_enable_3dlut_fl(hubp, false); 504 + /* GPU memory only supports fast load path */ 505 + BREAK_TO_DEBUGGER(); 506 + lut_enable = false; 507 + result = false; 525 508 } 526 509 } else { 527 510 /* Legacy (Host) Load Mode */ ··· 557 572 struct pipe_ctx *pipe_ctx, 558 573 const struct dc_stream_state *stream) 559 574 { 575 + (void)dc; 560 576 int mpcc_id = pipe_ctx->plane_res.hubp->inst; 561 577 struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; 562 578 const struct pwl_params *params = NULL; ··· 619 633 struct drr_params *params, 620 634 unsigned int *event_triggers) 621 635 { 636 + (void)dc; 622 637 struct dc_stream_state *stream = pipe_ctx->stream; 623 638 int i; 624 639 ··· 1397 1410 struct dc_state *context, 1398 1411 bool lock) 1399 1412 { 1413 + (void)context; 1400 1414 /* use always for now */ 1401 1415 union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 }; 1402 1416 ··· 1818 1830 * This is meant to work around a known HW issue where VREADY will cancel the pending 3DLUT_ENABLE signal regardless 1819 1831 * of whether OTG lock is currently being held or not. 1820 1832 */ 1821 - const struct pipe_ctx *otg_master_pipe_ctx = resource_get_otg_master(pipe_ctx); 1822 - struct timing_generator *tg = otg_master_pipe_ctx ? 1823 - otg_master_pipe_ctx->stream_res.tg : NULL; 1824 - const struct pipe_ctx *primary_dpp_pipe_ctx = resource_is_pipe_type(pipe_ctx, DPP_PIPE) ? 1825 - resource_get_primary_dpp_pipe(pipe_ctx) : pipe_ctx; 1826 - struct hubp *primary_hubp = primary_dpp_pipe_ctx ? 1827 - primary_dpp_pipe_ctx->plane_res.hubp : NULL; 1833 + struct pipe_ctx *wa_pipes[MAX_PIPES] = { NULL }; 1834 + struct pipe_ctx *odm_pipe, *mpc_pipe; 1835 + int i, wa_pipe_ct = 0; 1828 1836 1829 - if (!otg_master_pipe_ctx && !tg) { 1830 - return; 1837 + for (odm_pipe = pipe_ctx; odm_pipe != NULL; odm_pipe = odm_pipe->next_odm_pipe) { 1838 + for (mpc_pipe = odm_pipe; mpc_pipe != NULL; mpc_pipe = mpc_pipe->bottom_pipe) { 1839 + if (mpc_pipe->plane_state && 1840 + mpc_pipe->plane_state->cm.flags.bits.lut3d_enable && 1841 + mpc_pipe->plane_state->cm.flags.bits.lut3d_dma_enable) { 1842 + wa_pipes[wa_pipe_ct++] = mpc_pipe; 1843 + } 1844 + } 1831 1845 } 1832 1846 1833 - if (primary_dpp_pipe_ctx && 1834 - primary_dpp_pipe_ctx->plane_state && 1835 - primary_dpp_pipe_ctx->plane_state->cm.flags.bits.lut3d_enable && 1836 - primary_dpp_pipe_ctx->plane_state->cm.flags.bits.lut3d_dma_enable) { 1837 - if (tg->funcs->set_vupdate_keepout) 1838 - tg->funcs->set_vupdate_keepout(tg, true); 1847 + if (wa_pipe_ct > 0) { 1848 + if (pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout) 1849 + pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout(pipe_ctx->stream_res.tg, true); 1839 1850 1840 - if (primary_hubp->funcs->hubp_enable_3dlut_fl) { 1841 - primary_hubp->funcs->hubp_enable_3dlut_fl(primary_hubp, true); 1851 + for (i = 0; i < wa_pipe_ct; ++i) { 1852 + if (wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl) 1853 + wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl(wa_pipes[i]->plane_res.hubp, true); 1842 1854 } 1843 1855 1844 - tg->funcs->unlock(tg); 1845 - if (tg->funcs->wait_update_lock_status) 1846 - tg->funcs->wait_update_lock_status(tg, false); 1856 + pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg); 1857 + if (pipe_ctx->stream_res.tg->funcs->wait_update_lock_status) 1858 + pipe_ctx->stream_res.tg->funcs->wait_update_lock_status(pipe_ctx->stream_res.tg, false); 1847 1859 1848 - if (primary_hubp->funcs->hubp_enable_3dlut_fl) { 1849 - primary_hubp->funcs->hubp_enable_3dlut_fl(primary_hubp, true); 1860 + for (i = 0; i < wa_pipe_ct; ++i) { 1861 + if (wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl) 1862 + wa_pipes[i]->plane_res.hubp->funcs->hubp_enable_3dlut_fl(wa_pipes[i]->plane_res.hubp, true); 1850 1863 } 1851 1864 1852 - if (tg->funcs->set_vupdate_keepout) 1853 - tg->funcs->set_vupdate_keepout(tg, false); 1865 + if (pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout) 1866 + pipe_ctx->stream_res.tg->funcs->set_vupdate_keepout(pipe_ctx->stream_res.tg, false); 1854 1867 } else { 1855 - tg->funcs->unlock(tg); 1868 + pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg); 1856 1869 } 1857 1870 } 1858 1871 ··· 1872 1883 struct pipe_ctx *pipe_ctx, 1873 1884 struct dc_state *context) 1874 1885 { 1886 + (void)context; 1875 1887 struct dc_link *link = pipe_ctx->stream->link; 1876 1888 const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); 1877 1889 ··· 1928 1938 * the case where the same symclk is shared across multiple otg 1929 1939 * instances 1930 1940 */ 1931 - if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) 1941 + if (dc_is_tmds_signal(pipe_ctx->stream->signal)) 1932 1942 link->phy_state.symclk_ref_cnts.otg = 0; 1933 1943 if (link->phy_state.symclk_state == SYMCLK_ON_TX_OFF) { 1934 1944 link_hwss->disable_link_output(link, ··· 3248 3258 struct dc_state *context, 3249 3259 struct block_sequence_state *seq_state) 3250 3260 { 3261 + (void)context; 3251 3262 struct dwbc *dwb; 3252 3263 struct mcif_wb *mcif_wb; 3253 3264 ··· 3454 3463 struct dc_state *context, 3455 3464 struct block_sequence_state *seq_state) 3456 3465 { 3466 + (void)context; 3457 3467 struct dce_hwseq *hws = dc->hwseq; 3458 3468 uint32_t org_ip_request_cntl = 0; 3459 3469
+2 -1
drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
··· 41 41 bool dcn401_set_output_transfer_func(struct dc *dc, 42 42 struct pipe_ctx *pipe_ctx, 43 43 const struct dc_stream_state *stream); 44 - void dcn401_trigger_3dlut_dma_load(struct pipe_ctx *pipe_ctx); 44 + void dcn401_trigger_3dlut_dma_load(struct dc *dc, 45 + struct pipe_ctx *pipe_ctx); 45 46 void dcn401_calculate_dccg_tmds_div_value(struct pipe_ctx *pipe_ctx, 46 47 unsigned int *tmds_div); 47 48 enum dc_status dcn401_enable_stream_timing(
+4 -2
drivers/gpu/drm/amd/display/dc/hwss/dcn42/dcn42_hwseq.c
··· 66 66 struct dc_bios *dcb = dc->ctx->dc_bios; 67 67 struct resource_pool *res_pool = dc->res_pool; 68 68 int i; 69 - int edp_num; 69 + unsigned int edp_num; 70 70 uint32_t backlight = MAX_BACKLIGHT_LEVEL; 71 71 uint32_t user_level = MAX_BACKLIGHT_LEVEL; 72 72 bool dchub_ref_freq_changed; ··· 386 386 struct pipe_ctx *pipe_ctx, 387 387 const struct dc_plane_state *plane_state) 388 388 { 389 + (void)dc; 389 390 struct dpp *dpp = pipe_ctx->plane_res.dpp; 390 391 391 392 if (dpp && dpp->funcs->dpp_cm_hist_control) ··· 1001 1000 } 1002 1001 void dcn42_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc) 1003 1002 { 1003 + (void)dc; 1004 1004 struct crtc_stereo_flags flags = { 0 }; 1005 1005 struct dc_stream_state *stream = pipe_ctx->stream; 1006 1006 ··· 1065 1063 { 1066 1064 struct dc_link *edp_links[MAX_NUM_EDP]; 1067 1065 struct dc_link *edp_link = NULL; 1068 - int edp_num; 1066 + unsigned int edp_num; 1069 1067 int i = 0; 1070 1068 1071 1069 dc_get_edp_links(dc, edp_links, &edp_num);
+1 -1
drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
··· 1120 1120 void (*program_output_csc)(struct dc *dc, struct pipe_ctx *pipe_ctx, 1121 1121 enum dc_color_space colorspace, 1122 1122 uint16_t *matrix, int opp_id); 1123 - void (*trigger_3dlut_dma_load)(struct pipe_ctx *pipe_ctx); 1123 + void (*trigger_3dlut_dma_load)(struct dc *dc, struct pipe_ctx *pipe_ctx); 1124 1124 1125 1125 /* VM Related */ 1126 1126 int (*init_sys_ctx)(struct dce_hwseq *hws,
+7 -5
drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
··· 92 92 .enable_mask = DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK,\ 93 93 .enable_value = {\ 94 94 DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK,\ 95 - ~DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK\ 95 + (uint32_t)~DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK\ 96 96 },\ 97 97 .ack_reg = mmHPD ## reg_num ## _DC_HPD_INT_CONTROL,\ 98 98 .ack_mask = DC_HPD_INT_CONTROL__DC_HPD_INT_ACK_MASK,\ ··· 107 107 .enable_mask = DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK,\ 108 108 .enable_value = {\ 109 109 DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK,\ 110 - ~DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK },\ 110 + (uint32_t)~DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK },\ 111 111 .ack_reg = mmHPD ## reg_num ## _DC_HPD_INT_CONTROL,\ 112 112 .ack_mask = DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK_MASK,\ 113 113 .ack_value = DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK_MASK,\ ··· 121 121 GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK,\ 122 122 .enable_value = {\ 123 123 GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK,\ 124 - ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK},\ 124 + (uint32_t)~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK},\ 125 125 .ack_reg = mmDCP ## reg_num ## _GRPH_INTERRUPT_STATUS,\ 126 126 .ack_mask = GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK,\ 127 127 .ack_value = GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK,\ ··· 136 136 CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK,\ 137 137 .enable_value = {\ 138 138 CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK,\ 139 - ~CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK},\ 139 + (uint32_t)~CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK},\ 140 140 .ack_reg = mmCRTC ## reg_num ## _CRTC_V_UPDATE_INT_STATUS,\ 141 141 .ack_mask =\ 142 142 CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\ ··· 152 152 CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_MASK,\ 153 153 .enable_value = {\ 154 154 CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_MASK,\ 155 - ~CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_MASK},\ 155 + (uint32_t)~CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_MASK},\ 156 156 .ack_reg = mmCRTC ## reg_num ## _CRTC_VERTICAL_INTERRUPT0_CONTROL,\ 157 157 .ack_mask =\ 158 158 CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_CLEAR_MASK,\ ··· 183 183 const struct irq_source_info *info, 184 184 bool enable) 185 185 { 186 + (void)enable; 186 187 DC_LOG_ERROR("%s: called for non-implemented irq source, src_id=%u, ext_id=%u\n", 187 188 __func__, info->src_id, info->ext_id); 188 189 ··· 329 328 uint32_t src_id, 330 329 uint32_t ext_id) 331 330 { 331 + (void)irq_service; 332 332 switch (src_id) { 333 333 case VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0: 334 334 return DC_IRQ_SOURCE_VBLANK1;
+1 -1
drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
··· 79 79 block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\ 80 80 .enable_value = {\ 81 81 block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\ 82 - ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \ 82 + (uint32_t)~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \ 83 83 },\ 84 84 .ack_reg = SRI(reg2, block, reg_num),\ 85 85 .ack_mask = \
+5 -5
drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
··· 68 68 .enable_mask = DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK,\ 69 69 .enable_value = {\ 70 70 DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK,\ 71 - ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK\ 71 + (uint32_t)~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK\ 72 72 },\ 73 73 .ack_reg = mmDC_HPD ## reg_num ## _INT_CONTROL,\ 74 74 .ack_mask = DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK,\ ··· 83 83 .enable_mask = DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_EN_MASK,\ 84 84 .enable_value = {\ 85 85 DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_EN_MASK,\ 86 - ~DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_EN_MASK },\ 86 + (uint32_t)~DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_EN_MASK },\ 87 87 .ack_reg = mmDC_HPD ## reg_num ## _INT_CONTROL,\ 88 88 .ack_mask = DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_ACK_MASK,\ 89 89 .ack_value = DC_HPD1_INT_CONTROL__DC_HPD1_RX_INT_ACK_MASK,\ ··· 98 98 GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK,\ 99 99 .enable_value = {\ 100 100 GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK,\ 101 - ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK},\ 101 + (uint32_t)~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK},\ 102 102 .ack_reg = mmDCP ## reg_num ## _GRPH_INTERRUPT_STATUS,\ 103 103 .ack_mask = GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK,\ 104 104 .ack_value = GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK,\ ··· 113 113 CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK,\ 114 114 .enable_value = {\ 115 115 CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK,\ 116 - ~CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK},\ 116 + (uint32_t)~CRTC_INTERRUPT_CONTROL__CRTC_V_UPDATE_INT_MSK_MASK},\ 117 117 .ack_reg = mmCRTC ## reg_num ## _CRTC_V_UPDATE_INT_STATUS,\ 118 118 .ack_mask =\ 119 119 CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\ ··· 129 129 CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_MASK,\ 130 130 .enable_value = {\ 131 131 CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_MASK,\ 132 - ~CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_MASK},\ 132 + (uint32_t)~CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_INT_ENABLE_MASK},\ 133 133 .ack_reg = mmCRTC ## reg_num ## _CRTC_VERTICAL_INTERRUPT0_CONTROL,\ 134 134 .ack_mask =\ 135 135 CRTC_VERTICAL_INTERRUPT0_CONTROL__CRTC_VERTICAL_INTERRUPT0_CLEAR_MASK,\
+4 -1
drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
··· 42 42 uint32_t src_id, 43 43 uint32_t ext_id) 44 44 { 45 + (void)irq_service; 46 + (void)src_id; 47 + (void)ext_id; 45 48 switch (src_id) { 46 49 case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP: 47 50 return DC_IRQ_SOURCE_VBLANK1; ··· 179 176 block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\ 180 177 .enable_value = {\ 181 178 block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\ 182 - ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \ 179 + (uint32_t)~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \ 183 180 },\ 184 181 .ack_reg = SRI(reg2, block, reg_num),\ 185 182 .ack_mask = \
+4 -1
drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
··· 43 43 uint32_t src_id, 44 44 uint32_t ext_id) 45 45 { 46 + (void)irq_service; 47 + (void)src_id; 48 + (void)ext_id; 46 49 switch (src_id) { 47 50 case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP: 48 51 return DC_IRQ_SOURCE_VBLANK1; ··· 182 179 block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\ 183 180 .enable_value = {\ 184 181 block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\ 185 - ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \ 182 + (uint32_t)~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \ 186 183 },\ 187 184 .ack_reg = SRI(reg2, block, reg_num),\ 188 185 .ack_mask = \
+4 -1
drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
··· 42 42 uint32_t src_id, 43 43 uint32_t ext_id) 44 44 { 45 + (void)irq_service; 46 + (void)src_id; 47 + (void)ext_id; 45 48 switch (src_id) { 46 49 case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP: 47 50 return DC_IRQ_SOURCE_VBLANK1; ··· 192 189 block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\ 193 190 .enable_value = {\ 194 191 block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\ 195 - ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \ 192 + (uint32_t)~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \ 196 193 },\ 197 194 .ack_reg = SRI(reg2, block, reg_num),\ 198 195 .ack_mask = \
+4 -1
drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c
··· 50 50 uint32_t src_id, 51 51 uint32_t ext_id) 52 52 { 53 + (void)irq_service; 54 + (void)src_id; 55 + (void)ext_id; 53 56 switch (src_id) { 54 57 case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP: 55 58 return DC_IRQ_SOURCE_VBLANK1; ··· 199 196 block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\ 200 197 .enable_value = {\ 201 198 block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\ 202 - ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \ 199 + (uint32_t)~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \ 203 200 },\ 204 201 .ack_reg = SRI(reg2, block, reg_num),\ 205 202 .ack_mask = \
+5 -2
drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c
··· 37 37 38 38 static enum dc_irq_source to_dal_irq_source_dcn302(struct irq_service *irq_service, uint32_t src_id, uint32_t ext_id) 39 39 { 40 + (void)ext_id; 41 + (void)irq_service; 42 + (void)src_id; 40 43 switch (src_id) { 41 44 case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP: 42 45 return DC_IRQ_SOURCE_VBLANK1; ··· 183 180 .enable_mask = block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\ 184 181 .enable_value = {\ 185 182 block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\ 186 - ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \ 183 + (uint32_t)~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \ 187 184 },\ 188 185 .ack_reg = SRI(reg2, block, reg_num),\ 189 186 .ack_mask = block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\ ··· 202 199 reg1 ## __ ## mask1 ## _MASK,\ 203 200 .enable_value = {\ 204 201 reg1 ## __ ## mask1 ## _MASK,\ 205 - ~reg1 ## __ ## mask1 ## _MASK \ 202 + (uint32_t)~reg1 ## __ ## mask1 ## _MASK \ 206 203 },\ 207 204 .ack_reg = SRI_DMUB(reg2),\ 208 205 .ack_mask = \
+4 -1
drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c
··· 38 38 uint32_t src_id, 39 39 uint32_t ext_id) 40 40 { 41 + (void)irq_service; 42 + (void)src_id; 43 + (void)ext_id; 41 44 switch (src_id) { 42 45 case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP: 43 46 return DC_IRQ_SOURCE_VBLANK1; ··· 126 123 .enable_mask = block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\ 127 124 .enable_value = {\ 128 125 block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\ 129 - ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \ 126 + (uint32_t)~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \ 130 127 },\ 131 128 .ack_reg = SRI(reg2, block, reg_num),\ 132 129 .ack_mask = block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\
+5 -2
drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c
··· 40 40 uint32_t src_id, 41 41 uint32_t ext_id) 42 42 { 43 + (void)irq_service; 44 + (void)src_id; 45 + (void)ext_id; 43 46 switch (src_id) { 44 47 case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP: 45 48 return DC_IRQ_SOURCE_VBLANK1; ··· 187 184 block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\ 188 185 .enable_value = {\ 189 186 block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\ 190 - ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \ 187 + (uint32_t)~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \ 191 188 },\ 192 189 .ack_reg = SRI(reg2, block, reg_num),\ 193 190 .ack_mask = \ ··· 201 198 reg1 ## __ ## mask1 ## _MASK,\ 202 199 .enable_value = {\ 203 200 reg1 ## __ ## mask1 ## _MASK,\ 204 - ~reg1 ## __ ## mask1 ## _MASK \ 201 + (uint32_t)~reg1 ## __ ## mask1 ## _MASK \ 205 202 },\ 206 203 .ack_reg = SRI_DMUB(reg2),\ 207 204 .ack_mask = \
+5 -2
drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c
··· 42 42 uint32_t src_id, 43 43 uint32_t ext_id) 44 44 { 45 + (void)irq_service; 46 + (void)src_id; 47 + (void)ext_id; 45 48 switch (src_id) { 46 49 case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP: 47 50 return DC_IRQ_SOURCE_VBLANK1; ··· 189 186 block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\ 190 187 .enable_value = {\ 191 188 block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\ 192 - ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \ 189 + (uint32_t)~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \ 193 190 },\ 194 191 .ack_reg = SRI(reg2, block, reg_num),\ 195 192 .ack_mask = \ ··· 203 200 reg1 ## __ ## mask1 ## _MASK,\ 204 201 .enable_value = {\ 205 202 reg1 ## __ ## mask1 ## _MASK,\ 206 - ~reg1 ## __ ## mask1 ## _MASK \ 203 + (uint32_t)~reg1 ## __ ## mask1 ## _MASK \ 207 204 },\ 208 205 .ack_reg = SRI_DMUB(reg2),\ 209 206 .ack_mask = \
+5 -2
drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c
··· 47 47 uint32_t src_id, 48 48 uint32_t ext_id) 49 49 { 50 + (void)irq_service; 51 + (void)src_id; 52 + (void)ext_id; 50 53 switch (src_id) { 51 54 case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP: 52 55 return DC_IRQ_SOURCE_VBLANK1; ··· 194 191 block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\ 195 192 .enable_value = {\ 196 193 block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\ 197 - ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \ 194 + (uint32_t)~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \ 198 195 },\ 199 196 .ack_reg = SRI(reg2, block, reg_num),\ 200 197 .ack_mask = \ ··· 208 205 reg1 ## __ ## mask1 ## _MASK,\ 209 206 .enable_value = {\ 210 207 reg1 ## __ ## mask1 ## _MASK,\ 211 - ~reg1 ## __ ## mask1 ## _MASK \ 208 + (uint32_t)~reg1 ## __ ## mask1 ## _MASK \ 212 209 },\ 213 210 .ack_reg = SRI_DMUB(reg2),\ 214 211 .ack_mask = \
+5 -2
drivers/gpu/drm/amd/display/dc/irq/dcn32/irq_service_dcn32.c
··· 41 41 uint32_t src_id, 42 42 uint32_t ext_id) 43 43 { 44 + (void)irq_service; 45 + (void)src_id; 46 + (void)ext_id; 44 47 switch (src_id) { 45 48 case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP: 46 49 return DC_IRQ_SOURCE_VBLANK1; ··· 198 195 block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\ 199 196 .enable_value = {\ 200 197 block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\ 201 - ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \ 198 + (uint32_t)~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \ 202 199 },\ 203 200 .ack_reg = SRI(reg2, block, reg_num),\ 204 201 .ack_mask = \ ··· 212 209 reg1 ## __ ## mask1 ## _MASK,\ 213 210 .enable_value = {\ 214 211 reg1 ## __ ## mask1 ## _MASK,\ 215 - ~reg1 ## __ ## mask1 ## _MASK \ 212 + (uint32_t)~reg1 ## __ ## mask1 ## _MASK \ 216 213 },\ 217 214 .ack_reg = SRI_DMUB(reg2),\ 218 215 .ack_mask = \
+5 -2
drivers/gpu/drm/amd/display/dc/irq/dcn35/irq_service_dcn35.c
··· 39 39 uint32_t src_id, 40 40 uint32_t ext_id) 41 41 { 42 + (void)irq_service; 43 + (void)src_id; 44 + (void)ext_id; 42 45 switch (src_id) { 43 46 case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP: 44 47 return DC_IRQ_SOURCE_VBLANK1; ··· 187 184 REG_STRUCT[base + reg_num].enable_value[0] = \ 188 185 block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\ 189 186 REG_STRUCT[base + reg_num].enable_value[1] = \ 190 - ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK, \ 187 + (uint32_t)~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK, \ 191 188 REG_STRUCT[base + reg_num].ack_reg = SRI(reg2, block, reg_num),\ 192 189 REG_STRUCT[base + reg_num].ack_mask = \ 193 190 block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\ ··· 201 198 REG_STRUCT[base].enable_value[0] = \ 202 199 reg1 ## __ ## mask1 ## _MASK,\ 203 200 REG_STRUCT[base].enable_value[1] = \ 204 - ~reg1 ## __ ## mask1 ## _MASK, \ 201 + (uint32_t)~reg1 ## __ ## mask1 ## _MASK, \ 205 202 REG_STRUCT[base].ack_reg = SRI_DMUB(reg2),\ 206 203 REG_STRUCT[base].ack_mask = \ 207 204 reg2 ## __ ## mask2 ## _MASK,\
+5 -2
drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c
··· 18 18 uint32_t src_id, 19 19 uint32_t ext_id) 20 20 { 21 + (void)irq_service; 22 + (void)src_id; 23 + (void)ext_id; 21 24 switch (src_id) { 22 25 case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP: 23 26 return DC_IRQ_SOURCE_VBLANK1; ··· 166 163 REG_STRUCT[base + reg_num].enable_value[0] = \ 167 164 block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\ 168 165 REG_STRUCT[base + reg_num].enable_value[1] = \ 169 - ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK, \ 166 + (uint32_t)~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK, \ 170 167 REG_STRUCT[base + reg_num].ack_reg = SRI(reg2, block, reg_num),\ 171 168 REG_STRUCT[base + reg_num].ack_mask = \ 172 169 block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\ ··· 180 177 REG_STRUCT[base].enable_value[0] = \ 181 178 reg1 ## __ ## mask1 ## _MASK,\ 182 179 REG_STRUCT[base].enable_value[1] = \ 183 - ~reg1 ## __ ## mask1 ## _MASK, \ 180 + (uint32_t)~reg1 ## __ ## mask1 ## _MASK, \ 184 181 REG_STRUCT[base].ack_reg = SRI_DMUB(reg2),\ 185 182 REG_STRUCT[base].ack_mask = \ 186 183 reg2 ## __ ## mask2 ## _MASK,\
+5 -2
drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c
··· 17 17 uint32_t src_id, 18 18 uint32_t ext_id) 19 19 { 20 + (void)irq_service; 21 + (void)src_id; 22 + (void)ext_id; 20 23 switch (src_id) { 21 24 case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP: 22 25 return DC_IRQ_SOURCE_VBLANK1; ··· 165 162 REG_STRUCT[base + reg_num].enable_value[0] = \ 166 163 block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\ 167 164 REG_STRUCT[base + reg_num].enable_value[1] = \ 168 - ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK, \ 165 + (uint32_t)~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK, \ 169 166 REG_STRUCT[base + reg_num].ack_reg = SRI(reg2, block, reg_num),\ 170 167 REG_STRUCT[base + reg_num].ack_mask = \ 171 168 block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\ ··· 179 176 REG_STRUCT[base].enable_value[0] = \ 180 177 reg1 ## __ ## mask1 ## _MASK,\ 181 178 REG_STRUCT[base].enable_value[1] = \ 182 - ~reg1 ## __ ## mask1 ## _MASK, \ 179 + (uint32_t)~reg1 ## __ ## mask1 ## _MASK, \ 183 180 REG_STRUCT[base].ack_reg = SRI_DMUB(reg2),\ 184 181 REG_STRUCT[base].ack_mask = \ 185 182 reg2 ## __ ## mask2 ## _MASK,\
+5 -2
drivers/gpu/drm/amd/display/dc/irq/dcn401/irq_service_dcn401.c
··· 20 20 uint32_t src_id, 21 21 uint32_t ext_id) 22 22 { 23 + (void)irq_service; 24 + (void)src_id; 25 + (void)ext_id; 23 26 switch (src_id) { 24 27 case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP: 25 28 return DC_IRQ_SOURCE_VBLANK1; ··· 178 175 block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\ 179 176 .enable_value = {\ 180 177 block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\ 181 - ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \ 178 + (uint32_t)~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \ 182 179 },\ 183 180 .ack_reg = SRI(reg2, block, reg_num),\ 184 181 .ack_mask = \ ··· 192 189 reg1 ## __ ## mask1 ## _MASK,\ 193 190 .enable_value = {\ 194 191 reg1 ## __ ## mask1 ## _MASK,\ 195 - ~reg1 ## __ ## mask1 ## _MASK \ 192 + (uint32_t)~reg1 ## __ ## mask1 ## _MASK \ 196 193 },\ 197 194 .ack_reg = SRI_DMUB(reg2),\ 198 195 .ack_mask = \
+5 -2
drivers/gpu/drm/amd/display/dc/irq/dcn42/irq_service_dcn42.c
··· 19 19 uint32_t src_id, 20 20 uint32_t ext_id) 21 21 { 22 + (void)irq_service; 23 + (void)src_id; 24 + (void)ext_id; 22 25 switch (src_id) { 23 26 case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP: 24 27 return DC_IRQ_SOURCE_VBLANK1; ··· 176 173 block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\ 177 174 .enable_value = {\ 178 175 block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\ 179 - ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \ 176 + (uint32_t)~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \ 180 177 },\ 181 178 .ack_reg = SRI(reg2, block, reg_num),\ 182 179 .ack_mask = \ ··· 190 187 reg1 ## __ ## mask1 ## _MASK,\ 191 188 .enable_value = {\ 192 189 reg1 ## __ ## mask1 ## _MASK,\ 193 - ~reg1 ## __ ## mask1 ## _MASK \ 190 + (uint32_t)~reg1 ## __ ## mask1 ## _MASK \ 194 191 },\ 195 192 .ack_reg = SRI_DMUB(reg2),\ 196 193 .ack_mask = \
+5 -1
drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c
··· 117 117 bool rate_2x_mode, 118 118 struct mpc_dwb_flow_control *flow_control) 119 119 { 120 + (void)enable; 121 + (void)rate_2x_mode; 122 + (void)flow_control; 120 123 struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); 121 124 122 125 /* Always disable mpc out rate and flow control. ··· 911 908 bool is_lut_size17x17x17, 912 909 uint32_t rmu_idx) 913 910 { 911 + (void)is_color_channel_12bits; 914 912 uint32_t lut_mode; 915 913 struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); 916 914 ··· 1432 1428 } 1433 1429 1434 1430 //no vacant RMU units or invalid parameters acquire_post_bldn_3dlut 1435 - return -1; 1431 + return (uint32_t)-1; 1436 1432 } 1437 1433 1438 1434 static int mpcc3_release_rmu(struct mpc *mpc, int mpcc_id)
+1
drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c
··· 884 884 bool is_lut_size17x17x17, 885 885 uint32_t mpcc_id) 886 886 { 887 + (void)is_color_channel_12bits; 887 888 uint32_t lut_mode; 888 889 struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); 889 890
+1
drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c
··· 250 250 enum dc_color_depth color_dpth, 251 251 enum signal_type signal) 252 252 { 253 + (void)color_sp; 253 254 struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp); 254 255 255 256 REG_UPDATE_2(FMT_DYNAMIC_EXP_CNTL,
+4
drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c
··· 164 164 const enum signal_type signal, 165 165 bool use_vbios) 166 166 { 167 + (void)use_vbios; 167 168 struct dc_crtc_timing patched_crtc_timing; 168 169 uint32_t asic_blank_end; 169 170 uint32_t asic_blank_start; ··· 856 855 struct timing_generator *optc, 857 856 uint32_t early_cntl) 858 857 { 858 + (void)optc; 859 + (void)early_cntl; 859 860 /* asic design change, do not need this control 860 861 * empty for share caller logic 861 862 */ ··· 1252 1249 static void optc1_enable_stereo(struct timing_generator *optc, 1253 1250 const struct dc_crtc_timing *timing, struct crtc_stereo_flags *flags) 1254 1251 { 1252 + (void)timing; 1255 1253 struct optc *optc1 = DCN10TG_FROM_TG(optc); 1256 1254 1257 1255 if (flags) {
+2
drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c
··· 181 181 void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt, 182 182 int segment_width, int last_segment_width) 183 183 { 184 + (void)last_segment_width; 184 185 struct optc *optc1 = DCN10TG_FROM_TG(optc); 185 186 uint32_t memory_mask; 186 187 ··· 262 261 uint8_t master_clock_divider, 263 262 uint8_t slave_clock_divider) 264 263 { 264 + (void)slave_clock_divider; 265 265 /* accessing slave OTG registers */ 266 266 struct optc *optc1 = DCN10TG_FROM_TG(optc_slave); 267 267
+1
drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c
··· 218 218 void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt, 219 219 int segment_width, int last_segment_width) 220 220 { 221 + (void)last_segment_width; 221 222 struct optc *optc1 = DCN10TG_FROM_TG(optc); 222 223 uint32_t memory_mask = 0; 223 224
+1
drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
··· 43 43 static void optc31_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt, 44 44 int segment_width, int last_segment_width) 45 45 { 46 + (void)last_segment_width; 46 47 struct optc *optc1 = DCN10TG_FROM_TG(optc); 47 48 uint32_t memory_mask = 0; 48 49 int mem_count_per_opp = (segment_width + 2559) / 2560;
+1
drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c
··· 50 50 static void optc314_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt, 51 51 int segment_width, int last_segment_width) 52 52 { 53 + (void)last_segment_width; 53 54 struct optc *optc1 = DCN10TG_FROM_TG(optc); 54 55 uint32_t memory_mask = 0; 55 56 int h_active = segment_width * opp_cnt;
+1
drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
··· 45 45 static void optc32_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt, 46 46 int segment_width, int last_segment_width) 47 47 { 48 + (void)last_segment_width; 48 49 struct optc *optc1 = DCN10TG_FROM_TG(optc); 49 50 uint32_t memory_mask = 0; 50 51 int h_active = segment_width * opp_cnt;
+1
drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
··· 58 58 static void optc35_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt, 59 59 int segment_width, int last_segment_width) 60 60 { 61 + (void)last_segment_width; 61 62 struct optc *optc1 = DCN10TG_FROM_TG(optc); 62 63 uint32_t memory_mask = 0; 63 64 int h_active = segment_width * opp_cnt;
+7 -2
drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
··· 632 632 struct dc_context *ctx, 633 633 const struct encoder_init_data *enc_init_data) 634 634 { 635 + (void)ctx; 635 636 struct dce110_link_encoder *enc110 = 636 637 kzalloc_obj(struct dce110_link_encoder); 637 638 int link_regs_id; ··· 850 849 struct dc_state *context, 851 850 struct dc_stream_state *stream) 852 851 { 852 + (void)dc; 853 853 struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream); 854 854 855 855 if (!pipe_ctx) ··· 868 866 struct dc_state *context, 869 867 enum dc_validate_mode validate_mode) 870 868 { 869 + (void)validate_mode; 871 870 int i; 872 871 bool at_least_one_pipe = false; 873 872 struct dc_stream_state *stream = NULL; ··· 929 926 struct dc *dc, 930 927 struct dc_state *context) 931 928 { 929 + (void)dc; 932 930 if (!dce100_validate_surface_sets(context)) 933 931 return DC_FAIL_SURFACE_VALIDATE; 934 932 ··· 965 961 966 962 enum dc_status dce100_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps) 967 963 { 964 + (void)caps; 968 965 969 966 if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) 970 967 return DC_OK; ··· 1044 1039 1045 1040 pool->base.res_cap = &res_cap; 1046 1041 pool->base.funcs = &dce100_res_pool_funcs; 1047 - pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; 1042 + pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE; 1048 1043 1049 1044 bp = ctx->dc_bios; 1050 1045 ··· 1116 1111 /************************************************* 1117 1112 * Resource + asic cap harcoding * 1118 1113 *************************************************/ 1119 - pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; 1114 + pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE; 1120 1115 pool->base.pipe_count = res_cap.num_timing_generator; 1121 1116 pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator; 1122 1117 dc->caps.max_downscale_ratio = 200;
+6
drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
··· 667 667 struct dc_context *ctx, 668 668 const struct encoder_init_data *enc_init_data) 669 669 { 670 + (void)ctx; 670 671 struct dce110_link_encoder *enc110 = 671 672 kzalloc_obj(struct dce110_link_encoder); 672 673 int link_regs_id; ··· 972 971 struct dc_state *context, 973 972 enum dc_validate_mode validate_mode) 974 973 { 974 + (void)validate_mode; 975 975 bool result = false; 976 976 977 977 DC_LOG_BANDWIDTH_CALCS( ··· 1045 1043 static enum dc_status dce110_validate_plane(const struct dc_plane_state *plane_state, 1046 1044 struct dc_caps *caps) 1047 1045 { 1046 + (void)caps; 1048 1047 if (((plane_state->dst_rect.width * 2) < plane_state->src_rect.width) || 1049 1048 ((plane_state->dst_rect.height * 2) < plane_state->src_rect.height)) 1050 1049 return DC_FAIL_SURFACE_VALIDATE; ··· 1102 1099 struct dc *dc, 1103 1100 struct dc_state *context) 1104 1101 { 1102 + (void)dc; 1105 1103 if (!dce110_validate_surface_sets(context)) 1106 1104 return DC_FAIL_SURFACE_VALIDATE; 1107 1105 ··· 1134 1130 const struct resource_pool *pool, 1135 1131 const struct pipe_ctx *opp_head_pipe) 1136 1132 { 1133 + (void)cur_ctx; 1137 1134 struct dc_stream_state *stream = opp_head_pipe->stream; 1138 1135 struct dc *dc = stream->ctx->dc; 1139 1136 struct dce_hwseq *hws = dc->hwseq; ··· 1359 1354 struct dce110_resource_pool *pool, 1360 1355 struct hw_asic_id asic_id) 1361 1356 { 1357 + (void)asic_id; 1362 1358 unsigned int i; 1363 1359 struct dc_context *ctx = dc->ctx; 1364 1360 struct dc_bios *bp;
+6 -1
drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
··· 628 628 struct dc_context *ctx, 629 629 const struct encoder_init_data *enc_init_data) 630 630 { 631 + (void)ctx; 631 632 struct dce110_link_encoder *enc110 = 632 633 kzalloc_obj(struct dce110_link_encoder); 633 634 int link_regs_id; ··· 853 852 const struct resource_pool *pool, 854 853 const struct dc_stream_state *const stream) 855 854 { 855 + (void)res_ctx; 856 856 switch (stream->link->link_enc->transmitter) { 857 857 case TRANSMITTER_UNIPHY_A: 858 858 return pool->clock_sources[DCE112_CLK_SRC_PLL0]; ··· 877 875 struct dc_state *context, 878 876 struct dc_stream_state *stream) 879 877 { 878 + (void)dc; 880 879 struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream); 881 880 882 881 if (!pipe_ctx) ··· 895 892 struct dc_state *context, 896 893 enum dc_validate_mode validate_mode) 897 894 { 895 + (void)validate_mode; 898 896 bool result = false; 899 897 900 898 DC_LOG_BANDWIDTH_CALCS( ··· 1041 1037 struct dc *dc, 1042 1038 struct dc_state *context) 1043 1039 { 1040 + (void)dc; 1044 1041 if (!dce112_validate_surface_sets(context)) 1045 1042 return DC_FAIL_SURFACE_VALIDATE; 1046 1043 ··· 1245 1240 /************************************************* 1246 1241 * Resource + asic cap harcoding * 1247 1242 *************************************************/ 1248 - pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; 1243 + pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE; 1249 1244 pool->base.pipe_count = pool->base.res_cap->num_timing_generator; 1250 1245 pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator; 1251 1246 dc->caps.max_downscale_ratio = 200;
+2 -1
drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
··· 712 712 struct dc_context *ctx, 713 713 const struct encoder_init_data *enc_init_data) 714 714 { 715 + (void)ctx; 715 716 struct dce110_link_encoder *enc110 = 716 717 kzalloc_obj(struct dce110_link_encoder); 717 718 int link_regs_id; ··· 1082 1081 /* TODO: Fill more data from GreenlandAsicCapability.cpp */ 1083 1082 pool->base.pipe_count = res_cap.num_timing_generator; 1084 1083 pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator; 1085 - pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; 1084 + pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE; 1086 1085 1087 1086 dc->caps.max_downscale_ratio = 200; 1088 1087 dc->caps.i2c_speed_in_khz = 100;
+4 -3
drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
··· 734 734 struct dc_context *ctx, 735 735 const struct encoder_init_data *enc_init_data) 736 736 { 737 + (void)ctx; 737 738 struct dce110_link_encoder *enc110 = 738 739 kzalloc_obj(struct dce110_link_encoder); 739 740 int link_regs_id; ··· 935 934 /************************************************* 936 935 * Resource + asic cap harcoding * 937 936 *************************************************/ 938 - pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; 937 + pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE; 939 938 pool->base.pipe_count = res_cap.num_timing_generator; 940 939 pool->base.timing_generator_count = res_cap.num_timing_generator; 941 940 dc->caps.max_downscale_ratio = 200; ··· 1138 1137 /************************************************* 1139 1138 * Resource + asic cap harcoding * 1140 1139 *************************************************/ 1141 - pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; 1140 + pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE; 1142 1141 pool->base.pipe_count = res_cap_81.num_timing_generator; 1143 1142 pool->base.timing_generator_count = res_cap_81.num_timing_generator; 1144 1143 dc->caps.max_downscale_ratio = 200; ··· 1338 1337 /************************************************* 1339 1338 * Resource + asic cap harcoding * 1340 1339 *************************************************/ 1341 - pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; 1340 + pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE; 1342 1341 pool->base.pipe_count = res_cap_83.num_timing_generator; 1343 1342 pool->base.timing_generator_count = res_cap_83.num_timing_generator; 1344 1343 dc->caps.max_downscale_ratio = 200;
+4 -1
drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
··· 736 736 struct dc_context *ctx, 737 737 const struct encoder_init_data *enc_init_data) 738 738 { 739 + (void)ctx; 739 740 struct dcn10_link_encoder *enc10 = 740 741 kzalloc_obj(struct dcn10_link_encoder); 741 742 int link_regs_id; ··· 1050 1049 struct dc_state *context, 1051 1050 struct dc_stream_state *stream) 1052 1051 { 1052 + (void)dc; 1053 1053 struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream); 1054 1054 1055 1055 if (!pipe_ctx) ··· 1085 1083 const struct resource_pool *pool, 1086 1084 const struct pipe_ctx *opp_head_pipe) 1087 1085 { 1086 + (void)cur_ctx; 1088 1087 struct resource_context *res_ctx = &new_ctx->res_ctx; 1089 1088 struct pipe_ctx *head_pipe = resource_get_otg_master_for_stream(res_ctx, opp_head_pipe->stream); 1090 1089 struct pipe_ctx *idle_pipe = resource_find_free_secondary_pipe_legacy(res_ctx, pool, head_pipe); ··· 1349 1346 /************************************************* 1350 1347 * Resource + asic cap harcoding * 1351 1348 *************************************************/ 1352 - pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; 1349 + pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE; 1353 1350 1354 1351 /* max pipe num for ASIC before check pipe fuses */ 1355 1352 pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
+21 -3
drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
··· 916 916 struct dc_context *ctx, 917 917 const struct encoder_init_data *enc_init_data) 918 918 { 919 + (void)ctx; 919 920 struct dcn20_link_encoder *enc20 = 920 921 kzalloc_obj(struct dcn20_link_encoder); 921 922 int link_regs_id; ··· 1311 1310 1312 1311 enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream) 1313 1312 { 1313 + (void)dc; 1314 1314 enum dc_status status = DC_OK; 1315 1315 struct pipe_ctx *pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream); 1316 1316 ··· 1539 1537 struct pipe_ctx *primary_pipe, 1540 1538 struct pipe_ctx *secondary_pipe) 1541 1539 { 1540 + (void)res_ctx; 1542 1541 int pipe_idx = secondary_pipe->pipe_idx; 1543 1542 struct pipe_ctx *sec_bot_pipe = secondary_pipe->bottom_pipe; 1544 1543 ··· 1685 1682 const struct resource_pool *pool, 1686 1683 const struct pipe_ctx *primary_pipe) 1687 1684 { 1685 + (void)pool; 1688 1686 struct pipe_ctx *secondary_pipe = NULL; 1689 1687 1690 1688 if (dc && primary_pipe) { ··· 1818 1814 } 1819 1815 } 1820 1816 1817 + static bool is_dual_plane(enum surface_pixel_format format) 1818 + { 1819 + return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA; 1820 + } 1821 + 1821 1822 int dcn20_validate_apply_pipe_split_flags( 1822 1823 struct dc *dc, 1823 1824 struct dc_state *context, ··· 1907 1898 for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { 1908 1899 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; 1909 1900 int pipe_plane = v->pipe_plane[pipe_idx]; 1910 - bool split4mpc = context->stream_count == 1 && plane_count == 1 1911 - && dc->config.enable_4to1MPC && dc->res_pool->pipe_count >= 4; 1901 + bool split4mpc = false; 1902 + 1903 + if (context->stream_count == 1 && plane_count == 1 1904 + && dc->config.allow_4to1MPC && dc->res_pool->pipe_count >= 4 1905 + && !dc->debug.disable_z9_mpc 1906 + && pipe->plane_state && is_dual_plane(pipe->plane_state->format) 1907 + && pipe->plane_state->src_rect.width <= 1920 1908 + && pipe->plane_state->src_rect.height <= 1080) 1909 + split4mpc = true; 1912 1910 1913 1911 if (!context->res_ctx.pipe_ctx[i].stream) 1914 1912 continue; ··· 2165 2149 const struct resource_pool *pool, 2166 2150 const struct pipe_ctx *opp_head) 2167 2151 { 2152 + (void)cur_ctx; 2168 2153 struct resource_context *res_ctx = &new_ctx->res_ctx; 2169 2154 struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(res_ctx, opp_head->stream); 2170 2155 struct pipe_ctx *sec_dpp_pipe = resource_find_free_secondary_pipe_legacy(res_ctx, pool, otg_master); ··· 2348 2331 2349 2332 static enum dml_project get_dml_project_version(uint32_t hw_internal_rev) 2350 2333 { 2334 + (void)hw_internal_rev; 2351 2335 return DML_PROJECT_NAVI10v2; 2352 2336 } 2353 2337 ··· 2435 2417 /************************************************* 2436 2418 * Resource + asic cap harcoding * 2437 2419 *************************************************/ 2438 - pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; 2420 + pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE; 2439 2421 2440 2422 dc->caps.max_downscale_ratio = 200; 2441 2423 dc->caps.i2c_speed_in_khz = 100;
+20 -13
drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
··· 772 772 int *pipe_cnt_out, 773 773 int *pipe_split_from, 774 774 int *vlevel_out, 775 - enum dc_validate_mode validate_mode) 775 + enum dc_validate_mode validate_mode, 776 + bool allow_self_refresh_only) 776 777 { 777 778 bool out = false; 778 779 int split[MAX_PIPES] = { 0 }; ··· 804 803 vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); 805 804 806 805 if (vlevel > context->bw_ctx.dml.soc.num_states) { 807 - /* 808 - * If mode is unsupported or there's still no p-state support then 809 - * fall back to favoring voltage. 810 - * 811 - * We don't actually support prefetch mode 2, so require that we 812 - * at least support prefetch mode 1. 813 - */ 814 - context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank = 815 - dm_allow_self_refresh; 816 - vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); 817 - if (vlevel > context->bw_ctx.dml.soc.num_states) 806 + 807 + if (allow_self_refresh_only) { 808 + /* 809 + * If mode is unsupported or there's still no p-state support then 810 + * fall back to favoring voltage. 811 + * 812 + * We don't actually support prefetch mode 2, so require that we 813 + * at least support prefetch mode 1. 814 + */ 815 + context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank = 816 + dm_allow_self_refresh; 817 + vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); 818 + if (vlevel > context->bw_ctx.dml.soc.num_states) 819 + goto validate_fail; 820 + } else { 818 821 goto validate_fail; 822 + } 819 823 } 820 824 821 825 vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge); ··· 1299 1293 struct dc_context *ctx, 1300 1294 const struct encoder_init_data *enc_init_data) 1301 1295 { 1296 + (void)ctx; 1302 1297 struct dcn21_link_encoder *enc21 = 1303 1298 kzalloc_obj(struct dcn21_link_encoder); 1304 1299 int link_regs_id; ··· 1409 1402 /************************************************* 1410 1403 * Resource + asic cap harcoding * 1411 1404 *************************************************/ 1412 - pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; 1405 + pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE; 1413 1406 1414 1407 /* max pipe num for ASIC before check pipe fuses */ 1415 1408 pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
+2 -1
drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h
··· 51 51 int *pipe_cnt_out, 52 52 int *pipe_split_from, 53 53 int *vlevel_out, 54 - enum dc_validate_mode validate_mode); 54 + enum dc_validate_mode validate_mode, 55 + bool allow_self_refresh_only); 55 56 56 57 #endif /* _DCN21_RESOURCE_H_ */
+4 -1
drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
··· 924 924 struct dc_context *ctx, 925 925 const struct encoder_init_data *enc_init_data) 926 926 { 927 + (void)ctx; 927 928 struct dcn20_link_encoder *enc20 = 928 929 kzalloc_obj(struct dcn20_link_encoder); 929 930 ··· 1642 1641 if (!pipes) 1643 1642 return false; 1644 1643 1644 + dcn20_merge_pipes_for_validate(dc, context); 1645 + 1645 1646 context->bw_ctx.dml.vba.maxMpcComb = 0; 1646 1647 context->bw_ctx.dml.vba.VoltageLevel = 0; 1647 1648 context->bw_ctx.dml.vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive; ··· 2298 2295 /************************************************* 2299 2296 * Resource + asic cap harcoding * 2300 2297 *************************************************/ 2301 - pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; 2298 + pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE; 2302 2299 pool->base.pipe_count = pool->base.res_cap->num_timing_generator; 2303 2300 pool->base.mpcc_count = pool->base.res_cap->num_timing_generator; 2304 2301 dc->caps.max_downscale_ratio = 600;
+2 -1
drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
··· 880 880 struct dc_context *ctx, 881 881 const struct encoder_init_data *enc_init_data) 882 882 { 883 + (void)ctx; 883 884 struct dcn20_link_encoder *enc20 = 884 885 kzalloc_obj(struct dcn20_link_encoder); 885 886 ··· 1429 1428 /************************************************* 1430 1429 * Resource + asic cap harcoding * 1431 1430 *************************************************/ 1432 - pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; 1431 + pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE; 1433 1432 pool->base.pipe_count = pool->base.res_cap->num_timing_generator; 1434 1433 pool->base.mpcc_count = pool->base.res_cap->num_timing_generator; 1435 1434 dc->caps.max_downscale_ratio = 600;
+2 -1
drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
··· 894 894 struct dc_context *ctx, 895 895 const struct encoder_init_data *enc_init_data) 896 896 { 897 + (void)ctx; 897 898 struct dcn20_link_encoder *enc20 = kzalloc_obj(struct dcn20_link_encoder); 898 899 899 900 if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) ··· 1219 1218 /************************************************* 1220 1219 * Resource + asic cap harcoding * 1221 1220 *************************************************/ 1222 - pool->underlay_pipe_index = NO_UNDERLAY_PIPE; 1221 + pool->underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE; 1223 1222 pool->pipe_count = pool->res_cap->num_timing_generator; 1224 1223 pool->mpcc_count = pool->res_cap->num_timing_generator; 1225 1224 dc->caps.max_downscale_ratio = 600;
+2 -1
drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
··· 839 839 struct dc_context *ctx, 840 840 const struct encoder_init_data *enc_init_data) 841 841 { 842 + (void)ctx; 842 843 struct dcn20_link_encoder *enc20 = kzalloc_obj(struct dcn20_link_encoder); 843 844 844 845 if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) ··· 1160 1159 /************************************************* 1161 1160 * Resource + asic cap harcoding * 1162 1161 *************************************************/ 1163 - pool->underlay_pipe_index = NO_UNDERLAY_PIPE; 1162 + pool->underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE; 1164 1163 pool->pipe_count = pool->res_cap->num_timing_generator; 1165 1164 pool->mpcc_count = pool->res_cap->num_timing_generator; 1166 1165 dc->caps.max_downscale_ratio = 600;
+8 -8
drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
··· 1090 1090 struct dc_context *ctx, 1091 1091 const struct encoder_init_data *enc_init_data) 1092 1092 { 1093 + (void)ctx; 1093 1094 struct dcn20_link_encoder *enc20 = 1094 1095 kzalloc_obj(struct dcn20_link_encoder); 1095 1096 ··· 1700 1699 pipe_cnt++; 1701 1700 } 1702 1701 context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_1_DEFAULT_DET_SIZE; 1703 - dc->config.enable_4to1MPC = false; 1702 + 1704 1703 if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) { 1705 - if (is_dual_plane(pipe->plane_state->format) 1706 - && pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) { 1707 - dc->config.enable_4to1MPC = true; 1708 - } else if (!is_dual_plane(pipe->plane_state->format) && pipe->plane_state->src_rect.width <= 5120) { 1704 + if (!is_dual_plane(pipe->plane_state->format) && pipe->plane_state->src_rect.width <= 5120) { 1709 1705 /* Limit to 5k max to avoid forced pipe split when there is not enough detile for swath */ 1710 1706 context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192; 1711 1707 pipes[0].pipe.src.unbounded_req_mode = true; ··· 1895 1897 /************************************************* 1896 1898 * Resource + asic cap harcoding * 1897 1899 *************************************************/ 1898 - pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; 1900 + pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE; 1899 1901 pool->base.pipe_count = pool->base.res_cap->num_timing_generator; 1900 1902 pool->base.mpcc_count = pool->base.res_cap->num_timing_generator; 1901 1903 dc->caps.max_downscale_ratio = 600; ··· 1919 1921 dc->caps.dmcub_support = true; 1920 1922 dc->caps.is_apu = true; 1921 1923 dc->caps.zstate_support = true; 1924 + 1925 + /* Enable 4to1MPC by default */ 1926 + dc->config.allow_4to1MPC = true; 1922 1927 1923 1928 /* Color pipeline capabilities */ 1924 1929 dc->caps.color.dpp.dcn_arch = 1; ··· 1963 1962 /* Use pipe context based otg sync logic */ 1964 1963 dc->config.use_pipe_ctx_sync_logic = true; 1965 1964 dc->config.disable_hbr_audio_dp2 = true; 1966 - 1967 - dc->config.no_native422_support = true; 1968 1965 1969 1966 /* read VBIOS LTTPR caps */ 1970 1967 { ··· 2244 2245 struct pipe_ctx *pipes, 2245 2246 struct audio_output *audio_output) 2246 2247 { 2248 + (void)link_setting; 2247 2249 struct dc_state *state = link->dc->current_state; 2248 2250 int i; 2249 2251
+5 -1
drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
··· 1148 1148 struct dc_context *ctx, 1149 1149 const struct encoder_init_data *enc_init_data) 1150 1150 { 1151 + (void)ctx; 1151 1152 struct dcn20_link_encoder *enc20 = 1152 1153 kzalloc_obj(struct dcn20_link_encoder); 1153 1154 ··· 1828 1827 /************************************************* 1829 1828 * Resource + asic cap harcoding * 1830 1829 *************************************************/ 1831 - pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; 1830 + pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE; 1832 1831 pool->base.pipe_count = pool->base.res_cap->num_timing_generator; 1833 1832 pool->base.mpcc_count = pool->base.res_cap->num_timing_generator; 1833 + 1834 + /* Enable 4to1MPC by default */ 1835 + dc->config.allow_4to1MPC = true; 1834 1836 dc->caps.max_downscale_ratio = 400; 1835 1837 dc->caps.i2c_speed_in_khz = 100; 1836 1838 dc->caps.i2c_speed_in_khz_hdcp = 100;
+5 -5
drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
··· 1089 1089 struct dc_context *ctx, 1090 1090 const struct encoder_init_data *enc_init_data) 1091 1091 { 1092 + (void)ctx; 1092 1093 struct dcn20_link_encoder *enc20 = 1093 1094 kzalloc_obj(struct dcn20_link_encoder); 1094 1095 ··· 1786 1785 if (context->bw_ctx.dml.ip.det_buffer_size_kbytes > DCN3_15_MAX_DET_SIZE) 1787 1786 context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_15_MAX_DET_SIZE; 1788 1787 1789 - dc->config.enable_4to1MPC = false; 1790 1788 if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) { 1791 1789 if (is_dual_plane(pipe->plane_state->format) 1792 1790 && pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) { 1793 - dc->config.enable_4to1MPC = true; 1794 1791 context->bw_ctx.dml.ip.det_buffer_size_kbytes = 1795 1792 (max_usable_det / DCN3_15_CRB_SEGMENT_SIZE_KB / 4) * DCN3_15_CRB_SEGMENT_SIZE_KB; 1796 1793 } else if (!is_dual_plane(pipe->plane_state->format) ··· 1867 1868 /************************************************* 1868 1869 * Resource + asic cap harcoding * 1869 1870 *************************************************/ 1870 - pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; 1871 + pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE; 1871 1872 pool->base.pipe_count = pool->base.res_cap->num_timing_generator; 1873 + 1874 + /* Enable 4to1MPC by default */ 1875 + dc->config.allow_4to1MPC = true; 1872 1876 pool->base.mpcc_count = pool->base.res_cap->num_timing_generator; 1873 1877 dc->caps.max_downscale_ratio = 600; 1874 1878 dc->caps.i2c_speed_in_khz = 100; ··· 1926 1924 dc->caps.color.mpc.ogam_rom_caps.pq = 0; 1927 1925 dc->caps.color.mpc.ogam_rom_caps.hlg = 0; 1928 1926 dc->caps.color.mpc.ocsc = 1; 1929 - 1930 - dc->config.no_native422_support = true; 1931 1927 1932 1928 /* read VBIOS LTTPR caps */ 1933 1929 {
+6 -3
drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
··· 1082 1082 struct dc_context *ctx, 1083 1083 const struct encoder_init_data *enc_init_data) 1084 1084 { 1085 + (void)ctx; 1085 1086 struct dcn20_link_encoder *enc20 = 1086 1087 kzalloc_obj(struct dcn20_link_encoder); 1087 1088 ··· 1670 1669 if (context->bw_ctx.dml.ip.det_buffer_size_kbytes > DCN3_16_MAX_DET_SIZE) 1671 1670 context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_16_MAX_DET_SIZE; 1672 1671 ASSERT(context->bw_ctx.dml.ip.det_buffer_size_kbytes >= DCN3_16_DEFAULT_DET_SIZE); 1673 - dc->config.enable_4to1MPC = false; 1674 1672 if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) { 1675 1673 if (is_dual_plane(pipe->plane_state->format) 1676 1674 && pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) { 1677 - dc->config.enable_4to1MPC = true; 1678 1675 context->bw_ctx.dml.ip.det_buffer_size_kbytes = 1679 1676 (max_usable_det / DCN3_16_CRB_SEGMENT_SIZE_KB / 4) * DCN3_16_CRB_SEGMENT_SIZE_KB; 1680 1677 } else if (!is_dual_plane(pipe->plane_state->format)) { ··· 1742 1743 /************************************************* 1743 1744 * Resource + asic cap harcoding * 1744 1745 *************************************************/ 1745 - pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; 1746 + pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE; 1746 1747 pool->base.pipe_count = pool->base.res_cap->num_timing_generator; 1747 1748 pool->base.mpcc_count = pool->base.res_cap->num_timing_generator; 1749 + 1750 + /* Enable 4to1MPC by default */ 1751 + dc->config.allow_4to1MPC = true; 1752 + 1748 1753 dc->caps.max_downscale_ratio = 600; 1749 1754 dc->caps.i2c_speed_in_khz = 100; 1750 1755 dc->caps.i2c_speed_in_khz_hdcp = 5; /*1.5 w/a applied by default*/
+1 -1
drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
··· 2191 2191 /************************************************* 2192 2192 * Resource + asic cap harcoding * 2193 2193 *************************************************/ 2194 - pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; 2194 + pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE; 2195 2195 pool->base.timing_generator_count = num_pipes; 2196 2196 pool->base.pipe_count = num_pipes; 2197 2197 pool->base.mpcc_count = num_pipes;
+1
drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource_helpers.c
··· 41 41 struct pipe_ctx *pipe_ctx, 42 42 bool ignore_cursor_buf) 43 43 { 44 + (void)dc; 44 45 struct hubp *hubp = pipe_ctx->plane_res.hubp; 45 46 uint32_t cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height; 46 47 uint32_t cursor_mall_size_bytes = 0;
+1 -1
drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
··· 1695 1695 /************************************************* 1696 1696 * Resource + asic cap harcoding * 1697 1697 *************************************************/ 1698 - pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; 1698 + pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE; 1699 1699 pool->base.timing_generator_count = num_pipes; 1700 1700 pool->base.pipe_count = num_pipes; 1701 1701 pool->base.mpcc_count = num_pipes;
+13 -2
drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
··· 1738 1738 { 1739 1739 bool out = false; 1740 1740 1741 + DC_FP_START(); 1741 1742 out = dml2_validate(dc, context, 1742 1743 context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, 1743 1744 validate_mode); 1745 + DC_FP_END(); 1744 1746 1745 1747 if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) 1746 1748 return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; ··· 1776 1774 return ret; 1777 1775 } 1778 1776 1777 + void dcn35_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) 1778 + { 1779 + DC_FP_START(); 1780 + dcn35_update_bw_bounding_box_fpu(dc, bw_params); 1781 + DC_FP_END(); 1782 + } 1779 1783 static struct resource_funcs dcn35_res_pool_funcs = { 1780 1784 .destroy = dcn35_destroy_resource_pool, 1781 1785 .link_enc_create = dcn35_link_encoder_create, ··· 1803 1795 .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, 1804 1796 .acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut, 1805 1797 .release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut, 1806 - .update_bw_bounding_box = dcn35_update_bw_bounding_box_fpu, 1798 + .update_bw_bounding_box = dcn35_update_bw_bounding_box, 1807 1799 .patch_unknown_plane_state = dcn35_patch_unknown_plane_state, 1808 1800 .get_panel_config_defaults = dcn35_get_panel_config_defaults, 1809 1801 .get_preferred_eng_id_dpia = dcn35_get_preferred_eng_id_dpia, ··· 1835 1827 clk_src_regs_init(3, D), 1836 1828 clk_src_regs_init(4, E); 1837 1829 1830 + /* Enable 4to1MPC by default */ 1831 + dc->config.allow_4to1MPC = true; 1832 + 1838 1833 #undef REG_STRUCT 1839 1834 #define REG_STRUCT abm_regs 1840 1835 abm_regs_init(0), ··· 1858 1847 /************************************************* 1859 1848 * Resource + asic cap harcoding * 1860 1849 *************************************************/ 1861 - pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; 1850 + pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE; 1862 1851 pool->base.pipe_count = pool->base.res_cap->num_timing_generator; 1863 1852 pool->base.mpcc_count = pool->base.res_cap->num_timing_generator; 1864 1853 dc->caps.max_downscale_ratio = 600;
+1
drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h
··· 312 312 #define DPP_REG_LIST_DCN35_RI(id)\ 313 313 DPP_REG_LIST_DCN30_COMMON_RI(id) 314 314 315 + void dcn35_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params); 315 316 #endif /* _DCN35_RESOURCE_H_ */
+13 -2
drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
··· 1718 1718 { 1719 1719 bool out = false; 1720 1720 1721 + DC_FP_START(); 1721 1722 out = dml2_validate(dc, context, 1722 1723 context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, 1723 1724 validate_mode); 1725 + DC_FP_END(); 1724 1726 1725 1727 if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) 1726 1728 return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; ··· 1749 1747 1750 1748 } 1751 1749 1750 + static void dcn351_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) 1751 + { 1752 + DC_FP_START(); 1753 + dcn351_update_bw_bounding_box_fpu(dc, bw_params); 1754 + DC_FP_END(); 1755 + } 1752 1756 static struct resource_funcs dcn351_res_pool_funcs = { 1753 1757 .destroy = dcn351_destroy_resource_pool, 1754 1758 .link_enc_create = dcn35_link_encoder_create, ··· 1776 1768 .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, 1777 1769 .acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut, 1778 1770 .release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut, 1779 - .update_bw_bounding_box = dcn351_update_bw_bounding_box_fpu, 1771 + .update_bw_bounding_box = dcn351_update_bw_bounding_box, 1780 1772 .patch_unknown_plane_state = dcn35_patch_unknown_plane_state, 1781 1773 .get_panel_config_defaults = dcn35_get_panel_config_defaults, 1782 1774 .get_preferred_eng_id_dpia = dcn351_get_preferred_eng_id_dpia, ··· 1808 1800 clk_src_regs_init(3, D), 1809 1801 clk_src_regs_init(4, E); 1810 1802 1803 + /* Enable 4to1MPC by default */ 1804 + dc->config.allow_4to1MPC = true; 1805 + 1811 1806 #undef REG_STRUCT 1812 1807 #define REG_STRUCT abm_regs 1813 1808 abm_regs_init(0), ··· 1831 1820 /************************************************* 1832 1821 * Resource + asic cap harcoding * 1833 1822 *************************************************/ 1834 - pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; 1823 + pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE; 1835 1824 pool->base.pipe_count = pool->base.res_cap->num_timing_generator; 1836 1825 pool->base.mpcc_count = pool->base.res_cap->num_timing_generator; 1837 1826 dc->caps.max_downscale_ratio = 600;
+4 -2
drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
··· 1725 1725 { 1726 1726 bool out = false; 1727 1727 1728 + DC_FP_START(); 1728 1729 out = dml2_validate(dc, context, 1729 1730 context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, 1730 1731 validate_mode); 1732 + DC_FP_END(); 1731 1733 1732 1734 if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) 1733 1735 return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; ··· 1777 1775 .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link, 1778 1776 .acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut, 1779 1777 .release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut, 1780 - .update_bw_bounding_box = dcn35_update_bw_bounding_box_fpu, 1778 + .update_bw_bounding_box = dcn35_update_bw_bounding_box, 1781 1779 .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, 1782 1780 .get_panel_config_defaults = dcn35_get_panel_config_defaults, 1783 1781 .get_preferred_eng_id_dpia = dcn36_get_preferred_eng_id_dpia, ··· 1828 1826 /************************************************* 1829 1827 * Resource + asic cap harcoding * 1830 1828 *************************************************/ 1831 - pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; 1829 + pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE; 1832 1830 pool->base.pipe_count = pool->base.res_cap->num_timing_generator; 1833 1831 pool->base.mpcc_count = pool->base.res_cap->num_timing_generator; 1834 1832 dc->caps.max_downscale_ratio = 600;
+24 -10
drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
··· 1643 1643 .get_subvp_en = dcn32_subvp_in_use, 1644 1644 }; 1645 1645 1646 - static void dcn401_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) 1646 + static void dcn401_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params) 1647 1647 { 1648 + dc_assert_fp_enabled(); 1649 + 1648 1650 /* re-calculate the available MALL size if required */ 1649 1651 if (bw_params->num_channels > 0) { 1650 1652 dc->caps.max_cab_allocation_bytes = dcn401_calc_num_avail_chans_for_mall( ··· 1655 1653 dc->caps.mall_size_total = dc->caps.max_cab_allocation_bytes; 1656 1654 } 1657 1655 1658 - DC_FP_START(); 1659 - 1660 1656 if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2) 1661 1657 dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2); 1662 1658 1663 1659 if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2_dc_power_source) 1664 1660 dml2_reinit(dc, &dc->dml2_dc_power_options, &dc->current_state->bw_ctx.dml2_dc_power_source); 1665 - 1666 - DC_FP_END(); 1667 1661 } 1668 1662 1663 + static void dcn401_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) 1664 + { 1665 + DC_FP_START(); 1666 + dcn401_update_bw_bounding_box_fpu(dc, bw_params); 1667 + DC_FP_END(); 1668 + } 1669 1669 enum dc_status dcn401_patch_unknown_plane_state(struct dc_plane_state *plane_state) 1670 1670 { 1671 1671 plane_state->tiling_info.gfxversion = DcGfxAddr3; ··· 1692 1688 } 1693 1689 } 1694 1690 1695 - if (dc->debug.using_dml2) 1691 + if (dc->debug.using_dml2) { 1692 + DC_FP_START(); 1696 1693 status = dml2_validate(dc, context, 1697 1694 context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, 1698 1695 validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; 1696 + DC_FP_END(); 1697 + } 1699 1698 1700 1699 if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING && status == DC_OK && dc_state_is_subvp_in_use(context)) { 1701 1700 /* check new stream configuration still supports cursor if subvp used */ ··· 1717 1710 1718 1711 if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING && status == DC_FAIL_HW_CURSOR_SUPPORT) { 1719 1712 /* attempt to validate again with subvp disabled due to cursor */ 1720 - if (dc->debug.using_dml2) 1713 + if (dc->debug.using_dml2) { 1714 + DC_FP_START(); 1721 1715 status = dml2_validate(dc, context, 1722 1716 context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, 1723 1717 validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; 1718 + DC_FP_END(); 1719 + } 1724 1720 } 1725 1721 1726 1722 return status; ··· 1732 1722 void dcn401_prepare_mcache_programming(struct dc *dc, 1733 1723 struct dc_state *context) 1734 1724 { 1735 - if (dc->debug.using_dml21) 1725 + if (dc->debug.using_dml21) { 1726 + DC_FP_START(); 1736 1727 dml2_prepare_mcache_programming(dc, context, 1737 - context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2); 1728 + context->power_source == DC_POWER_SOURCE_DC ? 1729 + context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2); 1730 + DC_FP_END(); 1731 + } 1738 1732 } 1739 1733 1740 1734 static void dcn401_build_pipe_pix_clk_params(struct pipe_ctx *pipe_ctx) ··· 1929 1915 /************************************************* 1930 1916 * Resource + asic cap harcoding * 1931 1917 *************************************************/ 1932 - pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; 1918 + pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE; 1933 1919 pool->base.timing_generator_count = num_pipes; 1934 1920 pool->base.pipe_count = num_pipes; 1935 1921 pool->base.mpcc_count = num_pipes;
+26 -8
drivers/gpu/drm/amd/display/dc/resource/dcn42/dcn42_resource.c
··· 761 761 .ignore_pg = true, 762 762 .disable_stutter_for_wm_program = true, 763 763 .min_deep_sleep_dcfclk_khz = 8000, 764 + .replay_skip_crtc_disabled = true, 765 + .psr_skip_crtc_disable = true, 764 766 }; 765 767 766 768 static const struct dc_check_config config_defaults = { ··· 1696 1694 static struct dc_cap_funcs cap_funcs = { 1697 1695 .get_dcc_compression_cap = dcn20_get_dcc_compression_cap}; 1698 1696 1697 + static void dcn42_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params) 1698 + { 1699 + (void)bw_params; 1700 + dc_assert_fp_enabled(); 1701 + 1702 + if (dc->current_state && dc->current_state->bw_ctx.dml2) 1703 + dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2); 1704 + } 1705 + 1699 1706 static void dcn42_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) 1700 1707 { 1701 1708 DC_FP_START(); 1702 - if (dc->current_state && dc->current_state->bw_ctx.dml2) 1703 - dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2); 1709 + dcn42_update_bw_bounding_box_fpu(dc, bw_params); 1704 1710 DC_FP_END(); 1705 1711 } 1706 - 1707 1712 enum dc_status dcn42_validate_bandwidth(struct dc *dc, 1708 1713 struct dc_state *context, 1709 1714 enum dc_validate_mode validate_mode) 1710 1715 { 1711 1716 bool out = false; 1712 1717 1718 + DC_FP_START(); 1719 + 1713 1720 out = dml2_validate(dc, context, context->bw_ctx.dml2, 1714 1721 validate_mode); 1715 - DC_FP_START(); 1722 + 1716 1723 if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING) { 1717 1724 /*not required for mode enumeration*/ 1718 1725 dcn42_decide_zstate_support(dc, context); 1719 1726 } 1727 + 1720 1728 DC_FP_END(); 1729 + 1721 1730 return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; 1722 1731 } 1723 1732 void dcn42_prepare_mcache_programming(struct dc *dc, 1724 1733 struct dc_state *context) 1725 1734 { 1726 - if (dc->debug.using_dml21) 1735 + if (dc->debug.using_dml21) { 1736 + DC_FP_START(); 1727 1737 dml2_prepare_mcache_programming(dc, context, 1728 1738 context->power_source == DC_POWER_SOURCE_DC ? 1729 - context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2); 1739 + context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2); 1740 + DC_FP_END(); 1741 + } 1730 1742 } 1731 1743 /* Create a minimal link encoder object not associated with a particular 1732 1744 * physical connector. ··· 1775 1759 struct dc_state *state, 1776 1760 const struct dc_stream_state *stream) 1777 1761 { 1762 + (void)state; 1763 + (void)stream; 1778 1764 return dc->caps.max_cursor_size; 1779 1765 } 1780 1766 static struct resource_funcs dcn42_res_pool_funcs = { ··· 1801 1783 .acquire_post_bldn_3dlut = dcn32_acquire_post_bldn_3dlut, 1802 1784 .release_post_bldn_3dlut = dcn32_release_post_bldn_3dlut, 1803 1785 .update_bw_bounding_box = dcn42_update_bw_bounding_box, 1804 - .patch_unknown_plane_state = dcn401_patch_unknown_plane_state, 1786 + .patch_unknown_plane_state = dcn35_patch_unknown_plane_state, 1805 1787 .get_panel_config_defaults = dcn42_get_panel_config_defaults, 1806 1788 .get_preferred_eng_id_dpia = dcn42_get_preferred_eng_id_dpia, 1807 1789 .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, ··· 1882 1864 /************************************************* 1883 1865 * Resource + asic cap harcoding * 1884 1866 *************************************************/ 1885 - pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; 1867 + pool->base.underlay_pipe_index = (unsigned int)NO_UNDERLAY_PIPE; 1886 1868 pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator; 1887 1869 pool->base.pipe_count = num_pipes; 1888 1870 pool->base.mpcc_count = num_pipes;
+1
drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c
··· 159 159 160 160 static void apply_soc_bb_updates(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config) 161 161 { 162 + (void)config; 162 163 /* Individual modification can be overwritten even if it was obtained by a previous function. 163 164 * Modifications are acquired in order of priority (lowest to highest). 164 165 */
+1
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
··· 6272 6272 uint16_t amd_vsdb_version; /**< AMD VSDB version */ 6273 6273 uint16_t min_frame_rate; /**< Maximum frame rate */ 6274 6274 uint16_t max_frame_rate; /**< Minimum frame rate */ 6275 + uint8_t freesync_mccs_vcp_code; /**< Freesync MCCS VCP code */ 6275 6276 }; 6276 6277 6277 6278 /**
+14 -6
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
··· 194 194 const struct dmub_window *cw6, 195 195 const struct dmub_window *region6) 196 196 { 197 + (void)region6; 197 198 union dmub_addr offset; 198 199 uint64_t fb_base, fb_offset; 199 200 ··· 397 396 398 397 void dmub_dcn20_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmub_srv_hw_params *params) 399 398 { 399 + (void)params; 400 400 union dmub_fw_boot_options boot_options = {0}; 401 401 402 402 REG_WRITE(DMCUB_SCRATCH14, boot_options.all); ··· 462 460 dmub->debug.inbox0_size = REG_READ(DMCUB_INBOX0_SIZE); 463 461 464 462 REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled); 465 - dmub->debug.is_dmcub_enabled = is_dmub_enabled; 463 + ASSERT(is_dmub_enabled <= 0xFF); 464 + dmub->debug.is_dmcub_enabled = (uint8_t)is_dmub_enabled; 466 465 467 466 REG_GET(DMCUB_CNTL, DMCUB_SOFT_RESET, &is_soft_reset); 468 - dmub->debug.is_dmcub_soft_reset = is_soft_reset; 467 + ASSERT(is_soft_reset <= 0xFF); 468 + dmub->debug.is_dmcub_soft_reset = (uint8_t)is_soft_reset; 469 469 470 470 REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset); 471 - dmub->debug.is_dmcub_secure_reset = is_sec_reset; 471 + ASSERT(is_sec_reset <= 0xFF); 472 + dmub->debug.is_dmcub_secure_reset = (uint8_t)is_sec_reset; 472 473 473 474 REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled); 474 - dmub->debug.is_traceport_en = is_traceport_enabled; 475 + ASSERT(is_traceport_enabled <= 0xFF); 476 + dmub->debug.is_traceport_en = (uint8_t)is_traceport_enabled; 475 477 476 478 REG_GET(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE, &is_cw0_enabled); 477 - dmub->debug.is_cw0_enabled = is_cw0_enabled; 479 + ASSERT(is_cw0_enabled <= 0xFF); 480 + dmub->debug.is_cw0_enabled = (uint8_t)is_cw0_enabled; 478 481 479 482 REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled); 480 - dmub->debug.is_cw6_enabled = is_cw6_enabled; 483 + ASSERT(is_cw6_enabled <= 0xFF); 484 + dmub->debug.is_cw6_enabled = (uint8_t)is_cw6_enabled; 481 485 }
+1
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c
··· 127 127 const struct dmub_window *cw6, 128 128 const struct dmub_window *region6) 129 129 { 130 + (void)region6; 130 131 union dmub_addr offset; 131 132 132 133 /* sienna_cichlid has hardwired virtual addressing for CW2-CW7 */
+16 -7
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
··· 195 195 const struct dmub_window *cw6, 196 196 const struct dmub_window *region6) 197 197 { 198 + (void)cw2; 199 + (void)region6; 198 200 union dmub_addr offset; 199 201 200 202 offset = cw3->offset; ··· 468 466 dmub->debug.outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE); 469 467 470 468 REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled); 471 - dmub->debug.is_dmcub_enabled = is_dmub_enabled; 469 + ASSERT(is_dmub_enabled <= 0xFF); 470 + dmub->debug.is_dmcub_enabled = (uint8_t)is_dmub_enabled; 472 471 473 472 REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &is_pwait); 474 - dmub->debug.is_pwait = is_pwait; 473 + ASSERT(is_pwait <= 0xFF); 474 + dmub->debug.is_pwait = (uint8_t)is_pwait; 475 475 476 476 REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset); 477 - dmub->debug.is_dmcub_soft_reset = is_soft_reset; 477 + ASSERT(is_soft_reset <= 0xFF); 478 + dmub->debug.is_dmcub_soft_reset = (uint8_t)is_soft_reset; 478 479 479 480 REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset); 480 - dmub->debug.is_dmcub_secure_reset = is_sec_reset; 481 + ASSERT(is_sec_reset <= 0xFF); 482 + dmub->debug.is_dmcub_secure_reset = (uint8_t)is_sec_reset; 481 483 482 484 REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled); 483 - dmub->debug.is_traceport_en = is_traceport_enabled; 485 + ASSERT(is_traceport_enabled <= 0xFF); 486 + dmub->debug.is_traceport_en = (uint8_t)is_traceport_enabled; 484 487 485 488 REG_GET(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE, &is_cw0_enabled); 486 - dmub->debug.is_cw0_enabled = is_cw0_enabled; 489 + ASSERT(is_cw0_enabled <= 0xFF); 490 + dmub->debug.is_cw0_enabled = (uint8_t)is_cw0_enabled; 487 491 488 492 REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled); 489 - dmub->debug.is_cw6_enabled = is_cw6_enabled; 493 + ASSERT(is_cw6_enabled <= 0xFF); 494 + dmub->debug.is_cw6_enabled = (uint8_t)is_cw6_enabled; 490 495 } 491 496 492 497 bool dmub_dcn31_should_detect(struct dmub_srv *dmub)
+12 -5
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
··· 237 237 const struct dmub_window *cw6, 238 238 const struct dmub_window *region6) 239 239 { 240 + (void)cw2; 241 + (void)region6; 240 242 union dmub_addr offset; 241 243 242 244 offset = cw3->offset; ··· 488 486 dmub->debug.outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE); 489 487 490 488 REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled); 491 - dmub->debug.is_dmcub_enabled = is_dmub_enabled; 489 + ASSERT(is_dmub_enabled <= 0xFF); 490 + dmub->debug.is_dmcub_enabled = (uint8_t)is_dmub_enabled; 492 491 493 492 REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &is_pwait); 494 - dmub->debug.is_pwait = is_pwait; 493 + ASSERT(is_pwait <= 0xFF); 494 + dmub->debug.is_pwait = (uint8_t)is_pwait; 495 495 496 496 REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset); 497 - dmub->debug.is_dmcub_soft_reset = is_soft_reset; 497 + ASSERT(is_soft_reset <= 0xFF); 498 + dmub->debug.is_dmcub_soft_reset = (uint8_t)is_soft_reset; 498 499 499 500 REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled); 500 - dmub->debug.is_traceport_en = is_traceport_enabled; 501 + ASSERT(is_traceport_enabled <= 0xFF); 502 + dmub->debug.is_traceport_en = (uint8_t)is_traceport_enabled; 501 503 502 504 REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled); 503 - dmub->debug.is_cw6_enabled = is_cw6_enabled; 505 + ASSERT(is_cw6_enabled <= 0xFF); 506 + dmub->debug.is_cw6_enabled = (uint8_t)is_cw6_enabled; 504 507 505 508 dmub->debug.gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0); 506 509 }
+12 -6
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
··· 222 222 const struct dmub_window *cw6, 223 223 const struct dmub_window *region6) 224 224 { 225 + (void)cw2; 225 226 union dmub_addr offset; 226 227 227 228 offset = cw3->offset; ··· 403 402 union dmub_fw_boot_options boot_options = {0}; 404 403 405 404 if (!dmub->dpia_supported) { 406 - dmub->dpia_supported = dmub_dcn35_get_fw_boot_option(dmub).bits.enable_dpia; 405 + dmub->dpia_supported = dmub_dcn35_get_fw_boot_option(dmub).bits.enable_dpia != 0; 407 406 } 408 407 409 408 boot_options.bits.z10_disable = params->disable_z10; ··· 509 508 dmub->debug.outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE); 510 509 511 510 REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled); 512 - dmub->debug.is_dmcub_enabled = is_dmub_enabled; 511 + ASSERT(is_dmub_enabled <= 0xFF); 512 + dmub->debug.is_dmcub_enabled = (uint8_t)is_dmub_enabled; 513 513 514 514 REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &is_pwait); 515 - dmub->debug.is_pwait = is_pwait; 515 + ASSERT(is_pwait <= 0xFF); 516 + dmub->debug.is_pwait = (uint8_t)is_pwait; 516 517 517 518 REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset); 518 - dmub->debug.is_dmcub_soft_reset = is_soft_reset; 519 + ASSERT(is_soft_reset <= 0xFF); 520 + dmub->debug.is_dmcub_soft_reset = (uint8_t)is_soft_reset; 519 521 520 522 REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled); 521 - dmub->debug.is_traceport_en = is_traceport_enabled; 523 + ASSERT(is_traceport_enabled <= 0xFF); 524 + dmub->debug.is_traceport_en = (uint8_t)is_traceport_enabled; 522 525 523 526 REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled); 524 - dmub->debug.is_cw6_enabled = is_cw6_enabled; 527 + ASSERT(is_cw6_enabled <= 0xFF); 528 + dmub->debug.is_cw6_enabled = (uint8_t)is_cw6_enabled; 525 529 526 530 dmub->debug.gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0); 527 531 }
+15 -7
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
··· 213 213 const struct dmub_window *cw6, 214 214 const struct dmub_window *region6) 215 215 { 216 + (void)cw2; 216 217 union dmub_addr offset; 217 218 218 219 offset = cw3->offset; ··· 474 473 dmub->debug.outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE); 475 474 476 475 REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled); 477 - dmub->debug.is_dmcub_enabled = is_dmub_enabled; 476 + ASSERT(is_dmub_enabled <= 0xFF); 477 + dmub->debug.is_dmcub_enabled = (uint8_t)is_dmub_enabled; 478 478 479 479 REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &is_pwait); 480 - dmub->debug.is_pwait = is_pwait; 480 + ASSERT(is_pwait <= 0xFF); 481 + dmub->debug.is_pwait = (uint8_t)is_pwait; 481 482 482 483 REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset); 483 - dmub->debug.is_dmcub_soft_reset = is_soft_reset; 484 + ASSERT(is_soft_reset <= 0xFF); 485 + dmub->debug.is_dmcub_soft_reset = (uint8_t)is_soft_reset; 484 486 485 487 REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset); 486 - dmub->debug.is_dmcub_secure_reset = is_sec_reset; 488 + ASSERT(is_sec_reset <= 0xFF); 489 + dmub->debug.is_dmcub_secure_reset = (uint8_t)is_sec_reset; 487 490 488 491 REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled); 489 - dmub->debug.is_traceport_en = is_traceport_enabled; 492 + ASSERT(is_traceport_enabled <= 0xFF); 493 + dmub->debug.is_traceport_en = (uint8_t)is_traceport_enabled; 490 494 491 495 REG_GET(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE, &is_cw0_enabled); 492 - dmub->debug.is_cw0_enabled = is_cw0_enabled; 496 + ASSERT(is_cw0_enabled <= 0xFF); 497 + dmub->debug.is_cw0_enabled = (uint8_t)is_cw0_enabled; 493 498 494 499 REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled); 495 - dmub->debug.is_cw6_enabled = is_cw6_enabled; 500 + ASSERT(is_cw6_enabled <= 0xFF); 501 + dmub->debug.is_cw6_enabled = (uint8_t)is_cw6_enabled; 496 502 497 503 dmub->debug.gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0); 498 504 }
+19 -13
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn42.c
··· 41 41 union dmub_fw_boot_options boot_options = {0}; 42 42 43 43 if (!dmub->dpia_supported) { 44 - dmub->dpia_supported = dmub_dcn42_get_fw_boot_option(dmub).bits.enable_dpia; 44 + dmub->dpia_supported = dmub_dcn42_get_fw_boot_option(dmub).bits.enable_dpia != 0; 45 45 } 46 46 47 47 boot_options.bits.z10_disable = params->disable_z10; ··· 229 229 const struct dmub_window *cw6, 230 230 const struct dmub_window *region6) 231 231 { 232 + (void)cw2; 232 233 union dmub_addr offset; 233 234 234 235 offset = cw3->offset; ··· 322 321 323 322 bool dmub_dcn42_is_supported(struct dmub_srv *dmub) 324 323 { 325 - uint32_t supported = 0; 326 - 327 - REG_GET(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE, &supported); 328 - 329 - return supported; 324 + // DCN without DMUB is not a supported configuration; safe to assume that it is always 325 + // present. 326 + return true; 330 327 } 331 328 332 329 union dmub_fw_boot_options dmub_dcn42_get_fw_boot_option(struct dmub_srv *dmub) ··· 677 678 dmub->debug.outbox1_size = REG_READ(DMCUB_OUTBOX1_SIZE); 678 679 679 680 REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled); 680 - dmub->debug.is_dmcub_enabled = is_dmub_enabled; 681 + ASSERT(is_dmub_enabled <= 0xFF); 682 + dmub->debug.is_dmcub_enabled = (uint8_t)is_dmub_enabled; 681 683 682 684 REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &is_pwait); 683 - dmub->debug.is_pwait = is_pwait; 685 + ASSERT(is_pwait <= 0xFF); 686 + dmub->debug.is_pwait = (uint8_t)is_pwait; 684 687 685 688 REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset); 686 - dmub->debug.is_dmcub_soft_reset = is_soft_reset; 689 + ASSERT(is_soft_reset <= 0xFF); 690 + dmub->debug.is_dmcub_soft_reset = (uint8_t)is_soft_reset; 687 691 688 692 REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset); 689 - dmub->debug.is_dmcub_secure_reset = is_sec_reset; 693 + ASSERT(is_sec_reset <= 0xFF); 694 + dmub->debug.is_dmcub_secure_reset = (uint8_t)is_sec_reset; 690 695 691 696 REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled); 692 - dmub->debug.is_traceport_en = is_traceport_enabled; 697 + ASSERT(is_traceport_enabled <= 0xFF); 698 + dmub->debug.is_traceport_en = (uint8_t)is_traceport_enabled; 693 699 694 700 REG_GET(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE, &is_cw0_enabled); 695 - dmub->debug.is_cw0_enabled = is_cw0_enabled; 701 + ASSERT(is_cw0_enabled <= 0xFF); 702 + dmub->debug.is_cw0_enabled = (uint8_t)is_cw0_enabled; 696 703 697 704 REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled); 698 - dmub->debug.is_cw6_enabled = is_cw6_enabled; 705 + ASSERT(is_cw6_enabled <= 0xFF); 706 + dmub->debug.is_cw6_enabled = (uint8_t)is_cw6_enabled; 699 707 700 708 dmub->debug.gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0); 701 709 }
+134 -2
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn42.h
··· 34 34 /* DCN42 register definitions. */ 35 35 36 36 #define DMUB_DCN42_REGS() \ 37 - DMUB_DCN35_REGS() \ 37 + DMUB_SR(DMCUB_CNTL) \ 38 + DMUB_SR(DMCUB_CNTL2) \ 39 + DMUB_SR(DMCUB_SEC_CNTL) \ 40 + DMUB_SR(DMCUB_INBOX0_SIZE) \ 41 + DMUB_SR(DMCUB_INBOX0_RPTR) \ 42 + DMUB_SR(DMCUB_INBOX0_WPTR) \ 43 + DMUB_SR(DMCUB_INBOX1_BASE_ADDRESS) \ 44 + DMUB_SR(DMCUB_INBOX1_SIZE) \ 45 + DMUB_SR(DMCUB_INBOX1_RPTR) \ 46 + DMUB_SR(DMCUB_INBOX1_WPTR) \ 47 + DMUB_SR(DMCUB_OUTBOX0_BASE_ADDRESS) \ 48 + DMUB_SR(DMCUB_OUTBOX0_SIZE) \ 49 + DMUB_SR(DMCUB_OUTBOX0_RPTR) \ 50 + DMUB_SR(DMCUB_OUTBOX0_WPTR) \ 51 + DMUB_SR(DMCUB_OUTBOX1_BASE_ADDRESS) \ 52 + DMUB_SR(DMCUB_OUTBOX1_SIZE) \ 53 + DMUB_SR(DMCUB_OUTBOX1_RPTR) \ 54 + DMUB_SR(DMCUB_OUTBOX1_WPTR) \ 55 + DMUB_SR(DMCUB_REGION3_CW0_OFFSET) \ 56 + DMUB_SR(DMCUB_REGION3_CW1_OFFSET) \ 57 + DMUB_SR(DMCUB_REGION3_CW2_OFFSET) \ 58 + DMUB_SR(DMCUB_REGION3_CW3_OFFSET) \ 59 + DMUB_SR(DMCUB_REGION3_CW4_OFFSET) \ 60 + DMUB_SR(DMCUB_REGION3_CW5_OFFSET) \ 61 + DMUB_SR(DMCUB_REGION3_CW6_OFFSET) \ 62 + DMUB_SR(DMCUB_REGION3_CW7_OFFSET) \ 63 + DMUB_SR(DMCUB_REGION3_CW0_OFFSET_HIGH) \ 64 + DMUB_SR(DMCUB_REGION3_CW1_OFFSET_HIGH) \ 65 + DMUB_SR(DMCUB_REGION3_CW2_OFFSET_HIGH) \ 66 + DMUB_SR(DMCUB_REGION3_CW3_OFFSET_HIGH) \ 67 + DMUB_SR(DMCUB_REGION3_CW4_OFFSET_HIGH) \ 68 + DMUB_SR(DMCUB_REGION3_CW5_OFFSET_HIGH) \ 69 + DMUB_SR(DMCUB_REGION3_CW6_OFFSET_HIGH) \ 70 + DMUB_SR(DMCUB_REGION3_CW7_OFFSET_HIGH) \ 71 + DMUB_SR(DMCUB_REGION3_CW0_BASE_ADDRESS) \ 72 + DMUB_SR(DMCUB_REGION3_CW1_BASE_ADDRESS) \ 73 + DMUB_SR(DMCUB_REGION3_CW2_BASE_ADDRESS) \ 74 + DMUB_SR(DMCUB_REGION3_CW3_BASE_ADDRESS) \ 75 + DMUB_SR(DMCUB_REGION3_CW4_BASE_ADDRESS) \ 76 + DMUB_SR(DMCUB_REGION3_CW5_BASE_ADDRESS) \ 77 + DMUB_SR(DMCUB_REGION3_CW6_BASE_ADDRESS) \ 78 + DMUB_SR(DMCUB_REGION3_CW7_BASE_ADDRESS) \ 79 + DMUB_SR(DMCUB_REGION3_CW0_TOP_ADDRESS) \ 80 + DMUB_SR(DMCUB_REGION3_CW1_TOP_ADDRESS) \ 81 + DMUB_SR(DMCUB_REGION3_CW2_TOP_ADDRESS) \ 82 + DMUB_SR(DMCUB_REGION3_CW3_TOP_ADDRESS) \ 83 + DMUB_SR(DMCUB_REGION3_CW4_TOP_ADDRESS) \ 84 + DMUB_SR(DMCUB_REGION3_CW5_TOP_ADDRESS) \ 85 + DMUB_SR(DMCUB_REGION3_CW6_TOP_ADDRESS) \ 86 + DMUB_SR(DMCUB_REGION3_CW7_TOP_ADDRESS) \ 87 + DMUB_SR(DMCUB_REGION4_OFFSET) \ 88 + DMUB_SR(DMCUB_REGION4_OFFSET_HIGH) \ 89 + DMUB_SR(DMCUB_REGION4_TOP_ADDRESS) \ 90 + DMUB_SR(DMCUB_REGION5_OFFSET) \ 91 + DMUB_SR(DMCUB_REGION5_OFFSET_HIGH) \ 92 + DMUB_SR(DMCUB_REGION5_TOP_ADDRESS) \ 93 + DMUB_SR(DMCUB_REGION6_OFFSET) \ 94 + DMUB_SR(DMCUB_REGION6_OFFSET_HIGH) \ 95 + DMUB_SR(DMCUB_REGION6_TOP_ADDRESS) \ 96 + DMUB_SR(DMCUB_SCRATCH0) \ 97 + DMUB_SR(DMCUB_SCRATCH1) \ 98 + DMUB_SR(DMCUB_SCRATCH2) \ 99 + DMUB_SR(DMCUB_SCRATCH3) \ 100 + DMUB_SR(DMCUB_SCRATCH4) \ 101 + DMUB_SR(DMCUB_SCRATCH5) \ 102 + DMUB_SR(DMCUB_SCRATCH6) \ 103 + DMUB_SR(DMCUB_SCRATCH7) \ 104 + DMUB_SR(DMCUB_SCRATCH8) \ 105 + DMUB_SR(DMCUB_SCRATCH9) \ 106 + DMUB_SR(DMCUB_SCRATCH10) \ 107 + DMUB_SR(DMCUB_SCRATCH11) \ 108 + DMUB_SR(DMCUB_SCRATCH12) \ 109 + DMUB_SR(DMCUB_SCRATCH13) \ 110 + DMUB_SR(DMCUB_SCRATCH14) \ 111 + DMUB_SR(DMCUB_SCRATCH15) \ 112 + DMUB_SR(DMCUB_SCRATCH16) \ 113 + DMUB_SR(DMCUB_SCRATCH17) \ 114 + DMUB_SR(DMCUB_SCRATCH18) \ 115 + DMUB_SR(DMCUB_SCRATCH19) \ 116 + DMUB_SR(DMCUB_SCRATCH20) \ 117 + DMUB_SR(DMCUB_SCRATCH21) \ 118 + DMUB_SR(DMCUB_GPINT_DATAIN0) \ 119 + DMUB_SR(DMCUB_GPINT_DATAIN1) \ 120 + DMUB_SR(DMCUB_GPINT_DATAOUT) \ 121 + DMUB_SR(MMHUBBUB_SOFT_RESET) \ 122 + DMUB_SR(DCN_VM_FB_LOCATION_BASE) \ 123 + DMUB_SR(DCN_VM_FB_OFFSET) \ 124 + DMUB_SR(DMCUB_TIMER_CURRENT) \ 125 + DMUB_SR(DMCUB_INST_FETCH_FAULT_ADDR) \ 126 + DMUB_SR(DMCUB_UNDEFINED_ADDRESS_FAULT_ADDR) \ 127 + DMUB_SR(DMCUB_DATA_WRITE_FAULT_ADDR) \ 128 + DMUB_SR(DMCUB_REGION3_TMR_AXI_SPACE) \ 129 + DMUB_SR(DMCUB_INTERRUPT_ENABLE) \ 130 + DMUB_SR(DMCUB_INTERRUPT_ACK) \ 131 + DMUB_SR(DMU_CLK_CNTL) \ 38 132 DMUB_SR(DMCUB_INTERRUPT_STATUS) \ 39 133 DMUB_SR(DMCUB_REG_INBOX0_RDY) \ 40 134 DMUB_SR(DMCUB_REG_INBOX0_MSG0) \ ··· 153 59 DMUB_SR(HOST_INTERRUPT_CSR) 154 60 155 61 #define DMUB_DCN42_FIELDS() \ 156 - DMUB_DCN35_FIELDS() \ 62 + DMUB_SF(DMCUB_CNTL, DMCUB_ENABLE) \ 63 + DMUB_SF(DMCUB_CNTL, DMCUB_TRACEPORT_EN) \ 64 + DMUB_SF(DMCUB_CNTL2, DMCUB_SOFT_RESET) \ 65 + DMUB_SF(DMCUB_SEC_CNTL, DMCUB_SEC_RESET) \ 66 + DMUB_SF(DMCUB_SEC_CNTL, DMCUB_MEM_UNIT_ID) \ 67 + DMUB_SF(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS) \ 68 + DMUB_SF(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_TOP_ADDRESS) \ 69 + DMUB_SF(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE) \ 70 + DMUB_SF(DMCUB_REGION3_CW1_TOP_ADDRESS, DMCUB_REGION3_CW1_TOP_ADDRESS) \ 71 + DMUB_SF(DMCUB_REGION3_CW1_TOP_ADDRESS, DMCUB_REGION3_CW1_ENABLE) \ 72 + DMUB_SF(DMCUB_REGION3_CW2_TOP_ADDRESS, DMCUB_REGION3_CW2_TOP_ADDRESS) \ 73 + DMUB_SF(DMCUB_REGION3_CW2_TOP_ADDRESS, DMCUB_REGION3_CW2_ENABLE) \ 74 + DMUB_SF(DMCUB_REGION3_CW3_TOP_ADDRESS, DMCUB_REGION3_CW3_TOP_ADDRESS) \ 75 + DMUB_SF(DMCUB_REGION3_CW3_TOP_ADDRESS, DMCUB_REGION3_CW3_ENABLE) \ 76 + DMUB_SF(DMCUB_REGION3_CW4_TOP_ADDRESS, DMCUB_REGION3_CW4_TOP_ADDRESS) \ 77 + DMUB_SF(DMCUB_REGION3_CW4_TOP_ADDRESS, DMCUB_REGION3_CW4_ENABLE) \ 78 + DMUB_SF(DMCUB_REGION3_CW5_TOP_ADDRESS, DMCUB_REGION3_CW5_TOP_ADDRESS) \ 79 + DMUB_SF(DMCUB_REGION3_CW5_TOP_ADDRESS, DMCUB_REGION3_CW5_ENABLE) \ 80 + DMUB_SF(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_TOP_ADDRESS) \ 81 + DMUB_SF(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE) \ 82 + DMUB_SF(DMCUB_REGION3_CW7_TOP_ADDRESS, DMCUB_REGION3_CW7_TOP_ADDRESS) \ 83 + DMUB_SF(DMCUB_REGION3_CW7_TOP_ADDRESS, DMCUB_REGION3_CW7_ENABLE) \ 84 + DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_TOP_ADDRESS) \ 85 + DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_ENABLE) \ 86 + DMUB_SF(DMCUB_REGION5_TOP_ADDRESS, DMCUB_REGION5_TOP_ADDRESS) \ 87 + DMUB_SF(DMCUB_REGION5_TOP_ADDRESS, DMCUB_REGION5_ENABLE) \ 88 + DMUB_SF(DMCUB_REGION6_TOP_ADDRESS, DMCUB_REGION6_TOP_ADDRESS) \ 89 + DMUB_SF(DMCUB_REGION6_TOP_ADDRESS, DMCUB_REGION6_ENABLE) \ 90 + DMUB_SF(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET) \ 91 + DMUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE) \ 92 + DMUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET) \ 93 + DMUB_SF(DMCUB_INBOX0_WPTR, DMCUB_INBOX0_WPTR) \ 94 + DMUB_SF(DMCUB_REGION3_TMR_AXI_SPACE, DMCUB_REGION3_TMR_AXI_SPACE) \ 95 + DMUB_SF(DMCUB_INTERRUPT_ENABLE, DMCUB_GPINT_IH_INT_EN) \ 96 + DMUB_SF(DMCUB_INTERRUPT_ACK, DMCUB_GPINT_IH_INT_ACK) \ 97 + DMUB_SF(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS) \ 98 + DMUB_SF(DMU_CLK_CNTL, LONO_DISPCLK_GATE_DISABLE) \ 99 + DMUB_SF(DMU_CLK_CNTL, LONO_SOCCLK_GATE_DISABLE) \ 100 + DMUB_SF(DMU_CLK_CNTL, LONO_DMCUBCLK_GATE_DISABLE) \ 157 101 DMUB_SF(DMCUB_INTERRUPT_STATUS, DMCUB_REG_OUTBOX0_RSP_INT_STAT) \ 158 102 DMUB_SF(HOST_INTERRUPT_CSR, HOST_REG_INBOX0_RSP_INT_ACK) \ 159 103 DMUB_SF(HOST_INTERRUPT_CSR, HOST_REG_INBOX0_RSP_INT_STAT) \
+3 -1
drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c
··· 45 45 uint32_t mask1, uint32_t field_value1, 46 46 va_list ap) 47 47 { 48 + (void)addr; 48 49 uint32_t shift, mask, field_value; 49 50 int i = 1; 50 51 ··· 58 57 mask = va_arg(ap, uint32_t); 59 58 field_value = va_arg(ap, uint32_t); 60 59 60 + ASSERT(shift <= 0xFF); 61 61 set_reg_field_value_masks(field_value_mask, field_value, mask, 62 - shift); 62 + (uint8_t)shift); 63 63 i++; 64 64 } 65 65 }
+4 -4
drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
··· 1034 1034 static void dmub_srv_update_reg_inbox0_status(struct dmub_srv *dmub) 1035 1035 { 1036 1036 if (dmub->reg_inbox0.is_pending) { 1037 - dmub->reg_inbox0.is_pending = dmub->hw_funcs.read_reg_inbox0_rsp_int_status && 1038 - !dmub->hw_funcs.read_reg_inbox0_rsp_int_status(dmub); 1037 + dmub->reg_inbox0.is_pending = (dmub->hw_funcs.read_reg_inbox0_rsp_int_status && 1038 + !dmub->hw_funcs.read_reg_inbox0_rsp_int_status(dmub)) != 0; 1039 1039 1040 1040 if (!dmub->reg_inbox0.is_pending) { 1041 1041 /* ack the rsp interrupt */ ··· 1320 1320 1321 1321 enum dmub_status dmub_srv_reg_cmd_execute(struct dmub_srv *dmub, union dmub_rb_cmd *cmd) 1322 1322 { 1323 - uint32_t num_pending = 0; 1323 + uint64_t num_pending = 0; 1324 1324 1325 1325 if (!dmub->hw_init) 1326 1326 return DMUB_STATUS_INVALID; ··· 1348 1348 1349 1349 dmub->reg_inbox0.num_submitted++; 1350 1350 dmub->reg_inbox0.is_pending = true; 1351 - dmub->reg_inbox0.is_multi_pending = cmd->cmd_common.header.multi_cmd_pending; 1351 + dmub->reg_inbox0.is_multi_pending = cmd->cmd_common.header.multi_cmd_pending != 0; 1352 1352 1353 1353 return DMUB_STATUS_OK; 1354 1354 }
+3 -2
drivers/gpu/drm/amd/pm/amdgpu_pm.c
··· 33 33 #include <linux/hwmon-sysfs.h> 34 34 #include <linux/nospec.h> 35 35 #include <linux/pm_runtime.h> 36 + #include <linux/string_choices.h> 36 37 #include <asm/processor.h> 37 38 38 39 #define MAX_NUM_OF_FEATURES_PER_SUBSET 8 ··· 1593 1592 1594 1593 return sysfs_emit(buf, "%s: thermal throttling logging %s, with interval %d seconds\n", 1595 1594 adev_to_drm(adev)->unique, 1596 - atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled", 1595 + str_enabled_disabled(atomic_read(&adev->throttling_logging_enabled)), 1597 1596 adev->throttling_logging_rs.interval / HZ + 1); 1598 1597 } 1599 1598 ··· 2236 2235 if (r) 2237 2236 return r; 2238 2237 2239 - return sysfs_emit(buf, "%s\n", npower ? "enabled" : "disabled"); 2238 + return sysfs_emit(buf, "%s\n", str_enabled_disabled(npower)); 2240 2239 } 2241 2240 2242 2241 /**
+15
drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c
··· 104 104 PP_GFXOFF_MASK); 105 105 hwmgr->pp_table_version = PP_TABLE_V0; 106 106 hwmgr->od_enabled = false; 107 + switch (hwmgr->chip_id) { 108 + case CHIP_BONAIRE: 109 + /* R9 M380 in iMac 2015: SMU hangs when enabling MCLK DPM 110 + * R7 260X cards with old MC ucode: MCLK DPM is unstable 111 + */ 112 + if (adev->pdev->subsystem_vendor == 0x106B || 113 + adev->pdev->device == 0x6658) { 114 + dev_info(adev->dev, "disabling MCLK DPM on quirky ASIC"); 115 + adev->pm.pp_feature &= ~PP_MCLK_DPM_MASK; 116 + hwmgr->feature_mask &= ~PP_MCLK_DPM_MASK; 117 + } 118 + break; 119 + default: 120 + break; 121 + } 107 122 smu7_init_function_pointers(hwmgr); 108 123 break; 109 124 case AMDGPU_FAMILY_CZ:
+109 -14
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
··· 787 787 hwmgr->dyn_state.vddc_dependency_on_mclk; 788 788 struct phm_cac_leakage_table *std_voltage_table = 789 789 hwmgr->dyn_state.cac_leakage_table; 790 - uint32_t i; 790 + uint32_t i, clk; 791 791 792 792 PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL, 793 793 "SCLK dependency table is missing. This table is mandatory", return -EINVAL); ··· 804 804 data->dpm_table.sclk_table.count = 0; 805 805 806 806 for (i = 0; i < allowed_vdd_sclk_table->count; i++) { 807 + clk = min(allowed_vdd_sclk_table->entries[i].clk, data->sclk_cap); 808 + 807 809 if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value != 808 - allowed_vdd_sclk_table->entries[i].clk) { 810 + clk) { 809 811 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value = 810 - allowed_vdd_sclk_table->entries[i].clk; 812 + clk; 811 813 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = (i == 0) ? 1 : 0; 812 814 data->dpm_table.sclk_table.count++; 813 815 } ··· 2796 2794 if (tmp) 2797 2795 return -EINVAL; 2798 2796 2799 - tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dep_on_dal_pwrl); 2797 + tmp = smu7_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk); 2800 2798 if (tmp) 2801 2799 return -EINVAL; 2802 2800 2803 - tmp = smu7_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk); 2801 + tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_display_clock); 2804 2802 if (tmp) 2805 2803 return -EINVAL; 2806 2804 ··· 2885 2883 2886 2884 static int smu7_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) 2887 2885 { 2888 - kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl); 2889 - hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL; 2886 + kfree(hwmgr->dyn_state.vddc_dependency_on_display_clock); 2887 + hwmgr->dyn_state.vddc_dependency_on_display_clock = NULL; 2890 2888 kfree(hwmgr->backend); 2891 2889 hwmgr->backend = NULL; 2892 2890 ··· 2957 2955 return ret; 2958 2956 } 2959 2957 2958 + static int smu7_init_voltage_dependency_on_display_clock_table(struct pp_hwmgr *hwmgr) 2959 + { 2960 + struct phm_clock_voltage_dependency_table *table; 2961 + 2962 + if (!amdgpu_device_ip_get_ip_block(hwmgr->adev, AMD_IP_BLOCK_TYPE_DCE)) 2963 + return 0; 2964 + 2965 + table = kzalloc(struct_size(table, entries, 4), GFP_KERNEL); 2966 + if (!table) 2967 + return -ENOMEM; 2968 + 2969 + if (hwmgr->chip_id >= CHIP_POLARIS10) { 2970 + table->entries[0].clk = 38918; 2971 + table->entries[1].clk = 45900; 2972 + table->entries[2].clk = 66700; 2973 + table->entries[3].clk = 113200; 2974 + 2975 + table->entries[0].v = 700; 2976 + table->entries[1].v = 740; 2977 + table->entries[2].v = 800; 2978 + table->entries[3].v = 900; 2979 + } else { 2980 + if (hwmgr->chip_family == AMDGPU_FAMILY_CZ) { 2981 + table->entries[0].clk = 35200; 2982 + table->entries[1].clk = 35200; 2983 + table->entries[2].clk = 46700; 2984 + table->entries[3].clk = 64300; 2985 + } else { 2986 + table->entries[0].clk = 0; 2987 + table->entries[1].clk = 35200; 2988 + table->entries[2].clk = 54000; 2989 + table->entries[3].clk = 62500; 2990 + } 2991 + 2992 + table->entries[0].v = 0; 2993 + table->entries[1].v = 720; 2994 + table->entries[2].v = 810; 2995 + table->entries[3].v = 900; 2996 + } 2997 + 2998 + table->count = 4; 2999 + hwmgr->dyn_state.vddc_dependency_on_display_clock = table; 3000 + return 0; 3001 + } 3002 + 3003 + static void smu7_set_sclk_cap(struct pp_hwmgr *hwmgr) 3004 + { 3005 + struct amdgpu_device *adev = hwmgr->adev; 3006 + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 3007 + 3008 + data->sclk_cap = 0xffffffff; 3009 + 3010 + if (hwmgr->od_enabled) 3011 + return; 3012 + 3013 + /* R9 390X board: last sclk dpm level is unstable, use lower sclk */ 3014 + if (adev->pdev->device == 0x67B0 && 3015 + adev->pdev->subsystem_vendor == 0x1043) 3016 + data->sclk_cap = 104000; /* 1040 MHz */ 3017 + 3018 + if (data->sclk_cap != 0xffffffff) 3019 + dev_info(adev->dev, "sclk cap: %u kHz on quirky ASIC\n", data->sclk_cap * 10); 3020 + } 3021 + 2960 3022 static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr) 2961 3023 { 2962 3024 struct amdgpu_device *adev = hwmgr->adev; ··· 3032 2966 return -ENOMEM; 3033 2967 3034 2968 hwmgr->backend = data; 2969 + smu7_set_sclk_cap(hwmgr); 3035 2970 smu7_patch_voltage_workaround(hwmgr); 3036 2971 smu7_init_dpm_defaults(hwmgr); 3037 2972 ··· 3050 2983 smu7_get_elb_voltages(hwmgr); 3051 2984 } 3052 2985 2986 + result = smu7_init_voltage_dependency_on_display_clock_table(hwmgr); 2987 + if (result) 2988 + goto fail; 2989 + 3053 2990 if (hwmgr->pp_table_version == PP_TABLE_V1) { 3054 2991 smu7_complete_dependency_tables(hwmgr); 3055 2992 smu7_set_private_data_based_on_pptable_v1(hwmgr); ··· 3061 2990 smu7_patch_dependency_tables_with_leakage(hwmgr); 3062 2991 smu7_set_private_data_based_on_pptable_v0(hwmgr); 3063 2992 } 3064 - 3065 - /* Initalize Dynamic State Adjustment Rule Settings */ 3066 - result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr); 3067 2993 3068 2994 if (result) 3069 2995 goto fail; ··· 3147 3079 return 0; 3148 3080 } 3149 3081 3082 + static uint32_t smu7_lookup_vddc_from_dispclk(struct pp_hwmgr *hwmgr) 3083 + { 3084 + const struct amd_pp_display_configuration *cfg = hwmgr->display_config; 3085 + const struct phm_clock_voltage_dependency_table *vddc_dep_on_dispclk = 3086 + hwmgr->dyn_state.vddc_dependency_on_display_clock; 3087 + uint32_t i; 3088 + 3089 + if (!vddc_dep_on_dispclk || !vddc_dep_on_dispclk->count || 3090 + !cfg || !cfg->num_display || !cfg->display_clk) 3091 + return 0; 3092 + 3093 + /* Start from 1 because ClocksStateUltraLow should not be used according to DC. */ 3094 + for (i = 1; i < vddc_dep_on_dispclk->count; ++i) 3095 + if (vddc_dep_on_dispclk->entries[i].clk >= cfg->display_clk) 3096 + return vddc_dep_on_dispclk->entries[i].v; 3097 + 3098 + return vddc_dep_on_dispclk->entries[vddc_dep_on_dispclk->count - 1].v; 3099 + } 3100 + 3101 + static void smu7_apply_minimum_dce_voltage_request(struct pp_hwmgr *hwmgr) 3102 + { 3103 + uint32_t req_vddc = smu7_lookup_vddc_from_dispclk(hwmgr); 3104 + 3105 + smum_send_msg_to_smc_with_parameter(hwmgr, 3106 + PPSMC_MSG_VddC_Request, 3107 + req_vddc * VOLTAGE_SCALE, 3108 + NULL); 3109 + } 3110 + 3150 3111 static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr) 3151 3112 { 3152 3113 struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); 3153 3114 3154 - if (hwmgr->pp_table_version == PP_TABLE_V1) 3155 - phm_apply_dal_min_voltage_request(hwmgr); 3156 - /* TO DO for v0 iceland and Ci*/ 3115 + smu7_apply_minimum_dce_voltage_request(hwmgr); 3157 3116 3158 3117 if (!data->sclk_dpm_key_disabled) { 3159 3118 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) ··· 3916 3821 3917 3822 /* Performance levels are arranged from low to high. */ 3918 3823 performance_level->memory_clock = memory_clock; 3919 - performance_level->engine_clock = engine_clock; 3824 + performance_level->engine_clock = min(engine_clock, data->sclk_cap); 3920 3825 3921 3826 pcie_gen_from_bios = visland_clk_info->ucPCIEGen; 3922 3827
+1
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.h
··· 234 234 uint32_t pcie_gen_cap; 235 235 uint32_t pcie_lane_cap; 236 236 uint32_t pcie_spc_cap; 237 + uint32_t sclk_cap; 237 238 struct smu7_leakage_voltage vddc_leakage; 238 239 struct smu7_leakage_voltage vddci_leakage; 239 240 struct smu7_leakage_voltage vddcgfx_leakage;
-83
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c
··· 484 484 return 0; 485 485 } 486 486 487 - /** 488 - * phm_initializa_dynamic_state_adjustment_rule_settings - Initialize Dynamic State Adjustment Rule Settings 489 - * 490 - * @hwmgr: the address of the powerplay hardware manager. 491 - */ 492 - int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr) 493 - { 494 - struct phm_clock_voltage_dependency_table *table_clk_vlt; 495 - struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); 496 - 497 - /* initialize vddc_dep_on_dal_pwrl table */ 498 - table_clk_vlt = kzalloc_flex(*table_clk_vlt, entries, 4); 499 - 500 - if (NULL == table_clk_vlt) { 501 - pr_err("Can not allocate space for vddc_dep_on_dal_pwrl! \n"); 502 - return -ENOMEM; 503 - } else { 504 - table_clk_vlt->count = 4; 505 - table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW; 506 - if (hwmgr->chip_id >= CHIP_POLARIS10 && 507 - hwmgr->chip_id <= CHIP_VEGAM) 508 - table_clk_vlt->entries[0].v = 700; 509 - else 510 - table_clk_vlt->entries[0].v = 0; 511 - table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW; 512 - if (hwmgr->chip_id >= CHIP_POLARIS10 && 513 - hwmgr->chip_id <= CHIP_VEGAM) 514 - table_clk_vlt->entries[1].v = 740; 515 - else 516 - table_clk_vlt->entries[1].v = 720; 517 - table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL; 518 - if (hwmgr->chip_id >= CHIP_POLARIS10 && 519 - hwmgr->chip_id <= CHIP_VEGAM) 520 - table_clk_vlt->entries[2].v = 800; 521 - else 522 - table_clk_vlt->entries[2].v = 810; 523 - table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE; 524 - table_clk_vlt->entries[3].v = 900; 525 - if (pptable_info != NULL) 526 - pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt; 527 - hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt; 528 - } 529 - 530 - return 0; 531 - } 532 - 533 487 uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask) 534 488 { 535 489 uint32_t level = 0; ··· 492 538 level++; 493 539 494 540 return level; 495 - } 496 - 497 - void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr) 498 - { 499 - struct phm_ppt_v1_information *table_info = 500 - (struct phm_ppt_v1_information *)hwmgr->pptable; 501 - struct phm_clock_voltage_dependency_table *table = 502 - table_info->vddc_dep_on_dal_pwrl; 503 - struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table; 504 - enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level; 505 - uint32_t req_vddc = 0, req_volt, i; 506 - 507 - if (!table || table->count <= 0 508 - || dal_power_level < PP_DAL_POWERLEVEL_ULTRALOW 509 - || dal_power_level > PP_DAL_POWERLEVEL_PERFORMANCE) 510 - return; 511 - 512 - for (i = 0; i < table->count; i++) { 513 - if (dal_power_level == table->entries[i].clk) { 514 - req_vddc = table->entries[i].v; 515 - break; 516 - } 517 - } 518 - 519 - vddc_table = table_info->vdd_dep_on_sclk; 520 - for (i = 0; i < vddc_table->count; i++) { 521 - if (req_vddc <= vddc_table->entries[i].vddc) { 522 - req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE); 523 - smum_send_msg_to_smc_with_parameter(hwmgr, 524 - PPSMC_MSG_VddC_Request, 525 - req_volt, 526 - NULL); 527 - return; 528 - } 529 - } 530 - pr_err("DAL requested level can not" 531 - " found a available voltage in VDDC DPM Table \n"); 532 541 } 533 542 534 543 int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
-2
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h
··· 87 87 extern int phm_find_boot_level(void *table, uint32_t value, uint32_t *boot_level); 88 88 extern int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr, phm_ppt_v1_voltage_lookup_table *lookup_table, 89 89 uint16_t virtual_voltage_id, int32_t *sclk); 90 - extern int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr); 91 90 extern uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask); 92 - extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr); 93 91 94 92 extern int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, 95 93 uint32_t sclk, uint16_t id, uint16_t *voltage);
+1 -1
drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
··· 631 631 struct phm_clock_voltage_dependency_table *vddci_dependency_on_mclk; 632 632 struct phm_clock_voltage_dependency_table *vddc_dependency_on_mclk; 633 633 struct phm_clock_voltage_dependency_table *mvdd_dependency_on_mclk; 634 + struct phm_clock_voltage_dependency_table *vddc_dependency_on_display_clock; 634 635 struct phm_clock_voltage_dependency_table *vddc_dep_on_dal_pwrl; 635 636 struct phm_clock_array *valid_sclk_values; 636 637 struct phm_clock_array *valid_mclk_values; ··· 773 772 const struct pp_smumgr_func *smumgr_funcs; 774 773 bool is_kicker; 775 774 776 - enum PP_DAL_POWERLEVEL dal_power_level; 777 775 struct phm_dynamic_state_info dyn_state; 778 776 const struct pp_hwmgr_func *hwmgr_func; 779 777 const struct pp_table_func *pptable_func;
+11 -4
drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
··· 245 245 smu_data->power_tune_defaults = &defaults_hawaii_pro; 246 246 break; 247 247 case 0x67B8: 248 - case 0x66B0: 248 + case 0x67B0: 249 249 smu_data->power_tune_defaults = &defaults_hawaii_xt; 250 250 break; 251 251 case 0x6640: ··· 543 543 { 544 544 struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend); 545 545 const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults; 546 - uint32_t temp; 547 546 548 547 if (ci_read_smc_sram_dword(hwmgr, 549 548 fuse_table_offset + 550 549 offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl), 551 - (uint32_t *)&temp, SMC_RAM_END)) 550 + (uint32_t *)&smu_data->power_tune_table.TdcWaterfallCtl, SMC_RAM_END)) 552 551 PP_ASSERT_WITH_CODE(false, 553 552 "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!", 554 553 return -EINVAL); ··· 1216 1217 } 1217 1218 1218 1219 memory_level->EnabledForThrottle = 1; 1219 - memory_level->EnabledForActivity = 1; 1220 + memory_level->EnabledForActivity = 0; 1220 1221 memory_level->UpH = data->current_profile_setting.mclk_up_hyst; 1221 1222 memory_level->DownH = data->current_profile_setting.mclk_down_hyst; 1222 1223 memory_level->VoltageDownH = 0; ··· 1319 1320 &(smu_data->smc_state_table.MemoryLevel[i])); 1320 1321 if (0 != result) 1321 1322 return result; 1323 + } 1324 + 1325 + if (data->mclk_dpm_key_disabled && dpm_table->mclk_table.count) { 1326 + /* Populate the table with the highest MCLK level when MCLK DPM is disabled */ 1327 + for (i = 0; i < dpm_table->mclk_table.count - 1; i++) { 1328 + levels[i] = levels[dpm_table->mclk_table.count - 1]; 1329 + levels[i].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH; 1330 + } 1322 1331 } 1323 1332 1324 1333 smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
-14
drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
··· 25 25 26 26 #include "amdgpu_smu.h" 27 27 28 - #define SMU11_DRIVER_IF_VERSION_INV 0xFFFFFFFF 29 - #define SMU11_DRIVER_IF_VERSION_ARCT 0x17 30 - #define SMU11_DRIVER_IF_VERSION_NV10 0x37 31 - #define SMU11_DRIVER_IF_VERSION_NV12 0x38 32 - #define SMU11_DRIVER_IF_VERSION_NV14 0x38 33 - #define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x40 34 - #define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0xE 35 - #define SMU11_DRIVER_IF_VERSION_VANGOGH 0x03 36 - #define SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish 0xF 37 - #define SMU11_DRIVER_IF_VERSION_Beige_Goby 0xD 38 - #define SMU11_DRIVER_IF_VERSION_Cyan_Skillfish 0x8 39 - 40 28 /* MP Apertures */ 41 29 #define MP0_Public 0x03800000 42 30 #define MP0_SRAM 0x03900000 ··· 135 147 int smu_v11_0_setup_pptable(struct smu_context *smu); 136 148 137 149 int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu); 138 - 139 - int smu_v11_0_check_fw_version(struct smu_context *smu); 140 150 141 151 int smu_v11_0_set_driver_table_location(struct smu_context *smu); 142 152
-2
drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h
··· 35 35 36 36 int smu_v12_0_check_fw_status(struct smu_context *smu); 37 37 38 - int smu_v12_0_check_fw_version(struct smu_context *smu); 39 - 40 38 int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate); 41 39 42 40 int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate);
-7
drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
··· 25 25 26 26 #include "amdgpu_smu.h" 27 27 28 - #define SMU14_DRIVER_IF_VERSION_INV 0xFFFFFFFF 29 - #define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x7 30 - #define SMU14_DRIVER_IF_VERSION_SMU_V14_0_1 0x6 31 - #define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x2E 32 - 33 28 #define FEATURE_MASK(feature) (1ULL << feature) 34 29 35 30 /* MP Apertures */ ··· 118 123 int smu_v14_0_setup_pptable(struct smu_context *smu); 119 124 120 125 int smu_v14_0_get_vbios_bootup_values(struct smu_context *smu); 121 - 122 - int smu_v14_0_check_fw_version(struct smu_context *smu); 123 126 124 127 int smu_v14_0_set_driver_table_location(struct smu_context *smu); 125 128
+4 -1
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
··· 65 65 #define SMU_FEATURES_HIGH_MASK 0xFFFFFFFF00000000 66 66 #define SMU_FEATURES_HIGH_SHIFT 32 67 67 68 + #define SMU11_DRIVER_IF_VERSION_ARCT 0x17 69 + 68 70 static const struct smu_feature_bits arcturus_dpm_features = { 69 71 .bits = { SMU_FEATURE_BIT_INIT(FEATURE_DPM_PREFETCHER_BIT), 70 72 SMU_FEATURE_BIT_INIT(FEATURE_DPM_GFXCLK_BIT), ··· 1907 1905 /* pptable related */ 1908 1906 .setup_pptable = arcturus_setup_pptable, 1909 1907 .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values, 1910 - .check_fw_version = smu_v11_0_check_fw_version, 1908 + .check_fw_version = smu_cmn_check_fw_version, 1911 1909 .write_pptable = smu_cmn_write_pptable, 1912 1910 .set_driver_table_location = smu_v11_0_set_driver_table_location, 1913 1911 .set_tool_table_location = smu_v11_0_set_tool_table_location, ··· 1960 1958 smu->table_map = arcturus_table_map; 1961 1959 smu->pwr_src_map = arcturus_pwr_src_map; 1962 1960 smu->workload_map = arcturus_workload_map; 1961 + smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_ARCT; 1963 1962 smu_v11_0_init_msg_ctl(smu, arcturus_message_map); 1964 1963 }
+2 -1
drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
··· 582 582 static const struct pptable_funcs cyan_skillfish_ppt_funcs = { 583 583 584 584 .check_fw_status = smu_v11_0_check_fw_status, 585 - .check_fw_version = smu_v11_0_check_fw_version, 585 + .check_fw_version = smu_cmn_check_fw_version, 586 586 .init_power = smu_v11_0_init_power, 587 587 .fini_power = smu_v11_0_fini_power, 588 588 .init_smc_tables = cyan_skillfish_init_smc_tables, ··· 605 605 smu->ppt_funcs = &cyan_skillfish_ppt_funcs; 606 606 smu->table_map = cyan_skillfish_table_map; 607 607 smu->is_apu = true; 608 + smu->smc_driver_if_version = MP1_DRIVER_IF_VERSION; 608 609 smu_v11_0_init_msg_ctl(smu, cyan_skillfish_message_map); 609 610 }
+20 -1
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
··· 73 73 74 74 #define SMU_11_0_GFX_BUSY_THRESHOLD 15 75 75 76 + #define SMU11_DRIVER_IF_VERSION_NV10 0x37 77 + #define SMU11_DRIVER_IF_VERSION_NV12 0x38 78 + #define SMU11_DRIVER_IF_VERSION_NV14 0x38 79 + 76 80 static struct cmn2asic_msg_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = { 77 81 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), 78 82 MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), ··· 3312 3308 .check_fw_status = smu_v11_0_check_fw_status, 3313 3309 .setup_pptable = navi10_setup_pptable, 3314 3310 .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values, 3315 - .check_fw_version = smu_v11_0_check_fw_version, 3311 + .check_fw_version = smu_cmn_check_fw_version, 3316 3312 .write_pptable = smu_cmn_write_pptable, 3317 3313 .set_driver_table_location = smu_v11_0_set_driver_table_location, 3318 3314 .set_tool_table_location = smu_v11_0_set_tool_table_location, ··· 3365 3361 3366 3362 void navi10_set_ppt_funcs(struct smu_context *smu) 3367 3363 { 3364 + struct amdgpu_device *adev = smu->adev; 3365 + 3368 3366 smu->ppt_funcs = &navi10_ppt_funcs; 3369 3367 smu->clock_map = navi10_clk_map; 3370 3368 smu->feature_map = navi10_feature_mask_map; 3371 3369 smu->table_map = navi10_table_map; 3372 3370 smu->pwr_src_map = navi10_pwr_src_map; 3373 3371 smu->workload_map = navi10_workload_map; 3372 + 3373 + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 3374 + case IP_VERSION(11, 0, 0): 3375 + smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV10; 3376 + break; 3377 + case IP_VERSION(11, 0, 9): 3378 + smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV12; 3379 + break; 3380 + case IP_VERSION(11, 0, 5): 3381 + smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV14; 3382 + break; 3383 + } 3384 + 3374 3385 smu_v11_0_init_msg_ctl(smu, navi10_message_map); 3375 3386 }
+24 -1
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
··· 3119 3119 .check_fw_status = smu_v11_0_check_fw_status, 3120 3120 .setup_pptable = sienna_cichlid_setup_pptable, 3121 3121 .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values, 3122 - .check_fw_version = smu_v11_0_check_fw_version, 3122 + .check_fw_version = smu_cmn_check_fw_version, 3123 3123 .write_pptable = smu_cmn_write_pptable, 3124 3124 .set_driver_table_location = smu_v11_0_set_driver_table_location, 3125 3125 .set_tool_table_location = smu_v11_0_set_tool_table_location, ··· 3176 3176 .mode2_reset = sienna_cichlid_mode2_reset, 3177 3177 }; 3178 3178 3179 + #define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x40 3180 + #define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0xE 3181 + #define SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish 0xF 3182 + #define SMU11_DRIVER_IF_VERSION_Beige_Goby 0xD 3183 + 3179 3184 void sienna_cichlid_set_ppt_funcs(struct smu_context *smu) 3180 3185 { 3186 + struct amdgpu_device *adev = smu->adev; 3187 + 3181 3188 smu->ppt_funcs = &sienna_cichlid_ppt_funcs; 3182 3189 smu->clock_map = sienna_cichlid_clk_map; 3183 3190 smu->feature_map = sienna_cichlid_feature_mask_map; 3184 3191 smu->table_map = sienna_cichlid_table_map; 3185 3192 smu->pwr_src_map = sienna_cichlid_pwr_src_map; 3186 3193 smu->workload_map = sienna_cichlid_workload_map; 3194 + 3195 + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 3196 + case IP_VERSION(11, 0, 7): 3197 + smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Sienna_Cichlid; 3198 + break; 3199 + case IP_VERSION(11, 0, 11): 3200 + smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Navy_Flounder; 3201 + break; 3202 + case IP_VERSION(11, 0, 12): 3203 + smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish; 3204 + break; 3205 + case IP_VERSION(11, 0, 13): 3206 + smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Beige_Goby; 3207 + break; 3208 + } 3209 + 3187 3210 smu_v11_0_init_msg_ctl(smu, sienna_cichlid_message_map); 3188 3211 }
-75
drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
··· 192 192 return -EIO; 193 193 } 194 194 195 - int smu_v11_0_check_fw_version(struct smu_context *smu) 196 - { 197 - struct amdgpu_device *adev = smu->adev; 198 - uint32_t if_version = 0xff, smu_version = 0xff; 199 - uint8_t smu_program, smu_major, smu_minor, smu_debug; 200 - int ret = 0; 201 - 202 - ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version); 203 - if (ret) 204 - return ret; 205 - 206 - smu_program = (smu_version >> 24) & 0xff; 207 - smu_major = (smu_version >> 16) & 0xff; 208 - smu_minor = (smu_version >> 8) & 0xff; 209 - smu_debug = (smu_version >> 0) & 0xff; 210 - if (smu->is_apu) 211 - adev->pm.fw_version = smu_version; 212 - 213 - switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 214 - case IP_VERSION(11, 0, 0): 215 - smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV10; 216 - break; 217 - case IP_VERSION(11, 0, 9): 218 - smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV12; 219 - break; 220 - case IP_VERSION(11, 0, 5): 221 - smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV14; 222 - break; 223 - case IP_VERSION(11, 0, 7): 224 - smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Sienna_Cichlid; 225 - break; 226 - case IP_VERSION(11, 0, 11): 227 - smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Navy_Flounder; 228 - break; 229 - case IP_VERSION(11, 5, 0): 230 - case IP_VERSION(11, 5, 2): 231 - smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_VANGOGH; 232 - break; 233 - case IP_VERSION(11, 0, 12): 234 - smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish; 235 - break; 236 - case IP_VERSION(11, 0, 13): 237 - smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Beige_Goby; 238 - break; 239 - case IP_VERSION(11, 0, 8): 240 - smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Cyan_Skillfish; 241 - break; 242 - case IP_VERSION(11, 0, 2): 243 - smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_ARCT; 244 - break; 245 - default: 246 - dev_err(smu->adev->dev, "smu unsupported IP version: 0x%x.\n", 247 - amdgpu_ip_version(adev, MP1_HWIP, 0)); 248 - smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_INV; 249 - break; 250 - } 251 - 252 - /* 253 - * 1. if_version mismatch is not critical as our fw is designed 254 - * to be backward compatible. 255 - * 2. New fw usually brings some optimizations. But that's visible 256 - * only on the paired driver. 257 - * Considering above, we just leave user a verbal message instead 258 - * of halt driver loading. 259 - */ 260 - if (if_version != smu->smc_driver_if_version) { 261 - dev_info(smu->adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, " 262 - "smu fw program = %d, version = 0x%08x (%d.%d.%d)\n", 263 - smu->smc_driver_if_version, if_version, 264 - smu_program, smu_version, smu_major, smu_minor, smu_debug); 265 - } 266 - 267 - return ret; 268 - } 269 - 270 195 static int smu_v11_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size) 271 196 { 272 197 struct amdgpu_device *adev = smu->adev;
+2 -1
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
··· 2511 2511 static const struct pptable_funcs vangogh_ppt_funcs = { 2512 2512 2513 2513 .check_fw_status = smu_v11_0_check_fw_status, 2514 - .check_fw_version = smu_v11_0_check_fw_version, 2514 + .check_fw_version = smu_cmn_check_fw_version, 2515 2515 .init_smc_tables = vangogh_init_smc_tables, 2516 2516 .fini_smc_tables = smu_v11_0_fini_smc_tables, 2517 2517 .init_power = smu_v11_0_init_power, ··· 2561 2561 smu->table_map = vangogh_table_map; 2562 2562 smu->workload_map = vangogh_workload_map; 2563 2563 smu->is_apu = true; 2564 + smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION; 2564 2565 smu_v11_0_init_msg_ctl(smu, vangogh_message_map); 2565 2566 }
+1 -1
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
··· 1457 1457 .get_power_profile_mode = renoir_get_power_profile_mode, 1458 1458 .read_sensor = renoir_read_sensor, 1459 1459 .check_fw_status = smu_v12_0_check_fw_status, 1460 - .check_fw_version = smu_v12_0_check_fw_version, 1460 + .check_fw_version = smu_cmn_check_fw_version, 1461 1461 .powergate_sdma = smu_v12_0_powergate_sdma, 1462 1462 .set_gfx_cgpg = smu_v12_0_set_gfx_cgpg, 1463 1463 .gfx_off_control = smu_v12_0_gfx_off_control,
-36
drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
··· 70 70 return -EIO; 71 71 } 72 72 73 - int smu_v12_0_check_fw_version(struct smu_context *smu) 74 - { 75 - struct amdgpu_device *adev = smu->adev; 76 - uint32_t if_version = 0xff, smu_version = 0xff; 77 - uint8_t smu_program, smu_major, smu_minor, smu_debug; 78 - int ret = 0; 79 - 80 - ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version); 81 - if (ret) 82 - return ret; 83 - 84 - smu_program = (smu_version >> 24) & 0xff; 85 - smu_major = (smu_version >> 16) & 0xff; 86 - smu_minor = (smu_version >> 8) & 0xff; 87 - smu_debug = (smu_version >> 0) & 0xff; 88 - if (smu->is_apu) 89 - adev->pm.fw_version = smu_version; 90 - 91 - /* 92 - * 1. if_version mismatch is not critical as our fw is designed 93 - * to be backward compatible. 94 - * 2. New fw usually brings some optimizations. But that's visible 95 - * only on the paired driver. 96 - * Considering above, we just leave user a verbal message instead 97 - * of halt driver loading. 98 - */ 99 - if (if_version != smu->smc_driver_if_version) { 100 - dev_info(smu->adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, " 101 - "smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n", 102 - smu->smc_driver_if_version, if_version, 103 - smu_program, smu_version, smu_major, smu_minor, smu_debug); 104 - } 105 - 106 - return ret; 107 - } 108 - 109 73 int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate) 110 74 { 111 75 if (!smu->is_apu)
-60
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
··· 229 229 return -EIO; 230 230 } 231 231 232 - int smu_v14_0_check_fw_version(struct smu_context *smu) 233 - { 234 - struct amdgpu_device *adev = smu->adev; 235 - uint32_t if_version = 0xff, smu_version = 0xff; 236 - uint8_t smu_program, smu_major, smu_minor, smu_debug; 237 - int ret = 0; 238 - 239 - ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version); 240 - if (ret) 241 - return ret; 242 - 243 - smu_program = (smu_version >> 24) & 0xff; 244 - smu_major = (smu_version >> 16) & 0xff; 245 - smu_minor = (smu_version >> 8) & 0xff; 246 - smu_debug = (smu_version >> 0) & 0xff; 247 - if (smu->is_apu) 248 - adev->pm.fw_version = smu_version; 249 - 250 - switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 251 - case IP_VERSION(14, 0, 0): 252 - case IP_VERSION(14, 0, 4): 253 - case IP_VERSION(14, 0, 5): 254 - smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0; 255 - break; 256 - case IP_VERSION(14, 0, 1): 257 - smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_1; 258 - break; 259 - case IP_VERSION(14, 0, 2): 260 - case IP_VERSION(14, 0, 3): 261 - smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2; 262 - break; 263 - default: 264 - dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n", 265 - amdgpu_ip_version(adev, MP1_HWIP, 0)); 266 - smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_INV; 267 - break; 268 - } 269 - 270 - if (adev->pm.fw) 271 - dev_dbg(smu->adev->dev, "smu fw reported program %d, version = 0x%08x (%d.%d.%d)\n", 272 - smu_program, smu_version, smu_major, smu_minor, smu_debug); 273 - 274 - /* 275 - * 1. if_version mismatch is not critical as our fw is designed 276 - * to be backward compatible. 277 - * 2. New fw usually brings some optimizations. But that's visible 278 - * only on the paired driver. 279 - * Considering above, we just leave user a verbal message instead 280 - * of halt driver loading. 281 - */ 282 - if (if_version != smu->smc_driver_if_version) { 283 - dev_info(adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, " 284 - "smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n", 285 - smu->smc_driver_if_version, if_version, 286 - smu_program, smu_version, smu_major, smu_minor, smu_debug); 287 - } 288 - 289 - return ret; 290 - } 291 - 292 232 static int smu_v14_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size) 293 233 { 294 234 struct amdgpu_device *adev = smu->adev;
+17 -1
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
··· 65 65 66 66 #define SMU_MALL_PG_CONFIG_DEFAULT SMU_MALL_PG_CONFIG_DRIVER_CONTROL_ALWAYS_ON 67 67 68 + #define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x7 69 + #define SMU14_DRIVER_IF_VERSION_SMU_V14_0_1 0x6 70 + 68 71 #define SMU_14_0_0_UMD_PSTATE_GFXCLK 700 69 72 #define SMU_14_0_0_UMD_PSTATE_SOCCLK 678 70 73 #define SMU_14_0_0_UMD_PSTATE_FCLK 1800 ··· 1702 1699 1703 1700 static const struct pptable_funcs smu_v14_0_0_ppt_funcs = { 1704 1701 .check_fw_status = smu_v14_0_check_fw_status, 1705 - .check_fw_version = smu_v14_0_check_fw_version, 1702 + .check_fw_version = smu_cmn_check_fw_version, 1706 1703 .init_smc_tables = smu_v14_0_0_init_smc_tables, 1707 1704 .fini_smc_tables = smu_v14_0_0_fini_smc_tables, 1708 1705 .get_vbios_bootup_values = smu_v14_0_get_vbios_bootup_values, ··· 1753 1750 1754 1751 void smu_v14_0_0_set_ppt_funcs(struct smu_context *smu) 1755 1752 { 1753 + struct amdgpu_device *adev = smu->adev; 1754 + 1756 1755 smu->ppt_funcs = &smu_v14_0_0_ppt_funcs; 1757 1756 smu->feature_map = smu_v14_0_0_feature_mask_map; 1758 1757 smu->table_map = smu_v14_0_0_table_map; 1759 1758 smu->is_apu = true; 1759 + 1760 + switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 1761 + case IP_VERSION(14, 0, 0): 1762 + case IP_VERSION(14, 0, 4): 1763 + case IP_VERSION(14, 0, 5): 1764 + smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0; 1765 + break; 1766 + case IP_VERSION(14, 0, 1): 1767 + smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_1; 1768 + break; 1769 + } 1760 1770 1761 1771 smu_v14_0_0_init_msg_ctl(smu); 1762 1772 }
+4 -1
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
··· 68 68 SMU_FEATURE_BIT_INIT(FEATURE_DPM_FCLK_BIT) } 69 69 }; 70 70 71 + #define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x2E 72 + 71 73 #define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE 0x4000 72 74 #define DEBUGSMC_MSG_Mode1Reset 2 73 75 #define LINK_SPEED_MAX 3 ··· 2800 2798 .fini_power = smu_v14_0_fini_power, 2801 2799 .check_fw_status = smu_v14_0_check_fw_status, 2802 2800 .setup_pptable = smu_v14_0_2_setup_pptable, 2803 - .check_fw_version = smu_v14_0_check_fw_version, 2801 + .check_fw_version = smu_cmn_check_fw_version, 2804 2802 .set_driver_table_location = smu_v14_0_set_driver_table_location, 2805 2803 .system_features_control = smu_v14_0_system_features_control, 2806 2804 .set_allowed_mask = smu_v14_0_set_allowed_mask, ··· 2865 2863 smu->table_map = smu_v14_0_2_table_map; 2866 2864 smu->pwr_src_map = smu_v14_0_2_pwr_src_map; 2867 2865 smu->workload_map = smu_v14_0_2_workload_map; 2866 + smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2; 2868 2867 smu_v14_0_2_init_msg_ctl(smu); 2869 2868 }