Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

drm/amdgpu: add support to query vram info from firmware

add support to query vram info from firmware

v2: change APU vram type, add multi-aid check
v3: seperate vram info query function into 3 parts and
call them in a helper func when requirements
are met.
v4: calculate vram_width for v9.x

Signed-off-by: Gangliang Xie <ganglxie@amd.com>
Reviewed-by: Lijo Lazar <lijo.lazar@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Gangliang Xie and committed by
Alex Deucher
02c3060e 7b82e92d

+315 -243
+248 -217
drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
··· 373 373 return -ENODEV; 374 374 } 375 375 376 - int 377 - amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev, 376 + int amdgpu_atomfirmware_get_integrated_system_info(struct amdgpu_device *adev, 377 + int *vram_width, int *vram_type, 378 + int *vram_vendor) 379 + { 380 + struct amdgpu_mode_info *mode_info = &adev->mode_info; 381 + int index; 382 + u16 data_offset, size; 383 + union igp_info *igp_info; 384 + u8 frev, crev; 385 + u8 mem_type; 386 + u32 mem_channel_number; 387 + u32 mem_channel_width; 388 + 389 + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 390 + integratedsysteminfo); 391 + if (amdgpu_atom_parse_data_header(mode_info->atom_context, 392 + index, &size, 393 + &frev, &crev, &data_offset)) { 394 + igp_info = (union igp_info *) 395 + (mode_info->atom_context->bios + data_offset); 396 + switch (frev) { 397 + case 1: 398 + switch (crev) { 399 + case 11: 400 + case 12: 401 + mem_channel_number = igp_info->v11.umachannelnumber; 402 + if (!mem_channel_number) 403 + mem_channel_number = 1; 404 + mem_type = igp_info->v11.memorytype; 405 + if (mem_type == LpDdr5MemType) 406 + mem_channel_width = 32; 407 + else 408 + mem_channel_width = 64; 409 + if (vram_width) 410 + *vram_width = mem_channel_number * mem_channel_width; 411 + if (vram_type) 412 + *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 413 + break; 414 + default: 415 + return -EINVAL; 416 + } 417 + break; 418 + case 2: 419 + switch (crev) { 420 + case 1: 421 + case 2: 422 + mem_channel_number = igp_info->v21.umachannelnumber; 423 + if (!mem_channel_number) 424 + mem_channel_number = 1; 425 + mem_type = igp_info->v21.memorytype; 426 + if (mem_type == LpDdr5MemType) 427 + mem_channel_width = 32; 428 + else 429 + mem_channel_width = 64; 430 + if (vram_width) 431 + *vram_width = mem_channel_number * mem_channel_width; 432 + if (vram_type) 433 + *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 434 + break; 435 + case 3: 436 + mem_channel_number = igp_info->v23.umachannelnumber; 437 + if (!mem_channel_number) 438 + mem_channel_number = 1; 439 + mem_type = igp_info->v23.memorytype; 440 + if (mem_type == LpDdr5MemType) 441 + mem_channel_width = 32; 442 + else 443 + mem_channel_width = 64; 444 + if (vram_width) 445 + *vram_width = mem_channel_number * mem_channel_width; 446 + if (vram_type) 447 + *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 448 + break; 449 + default: 450 + return -EINVAL; 451 + } 452 + break; 453 + default: 454 + return -EINVAL; 455 + } 456 + } else { 457 + return -EINVAL; 458 + } 459 + return 0; 460 + } 461 + 462 + int amdgpu_atomfirmware_get_umc_info(struct amdgpu_device *adev, 463 + int *vram_width, int *vram_type, 464 + int *vram_vendor) 465 + { 466 + struct amdgpu_mode_info *mode_info = &adev->mode_info; 467 + int index; 468 + u16 data_offset, size; 469 + union umc_info *umc_info; 470 + u8 frev, crev; 471 + u8 mem_type; 472 + u8 mem_vendor; 473 + u32 mem_channel_number; 474 + u32 mem_channel_width; 475 + 476 + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, umc_info); 477 + 478 + if (amdgpu_atom_parse_data_header(mode_info->atom_context, 479 + index, &size, 480 + &frev, &crev, &data_offset)) { 481 + umc_info = (union umc_info *)(mode_info->atom_context->bios + data_offset); 482 + 483 + if (frev == 4) { 484 + switch (crev) { 485 + case 0: 486 + mem_channel_number = le32_to_cpu(umc_info->v40.channel_num); 487 + mem_type = le32_to_cpu(umc_info->v40.vram_type); 488 + mem_channel_width = le32_to_cpu(umc_info->v40.channel_width); 489 + mem_vendor = RREG32(adev->bios_scratch_reg_offset + 4) & 0xF; 490 + if (vram_vendor) 491 + *vram_vendor = mem_vendor; 492 + if (vram_type) 493 + *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 494 + if (vram_width) 495 + *vram_width = mem_channel_number * (1 << mem_channel_width); 496 + break; 497 + default: 498 + return -EINVAL; 499 + } 500 + } else { 501 + return -EINVAL; 502 + } 503 + } else { 504 + return -EINVAL; 505 + } 506 + 507 + return 0; 508 + } 509 + 510 + int amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev, 378 511 int *vram_width, int *vram_type, 379 512 int *vram_vendor) 380 513 { 381 514 struct amdgpu_mode_info *mode_info = &adev->mode_info; 382 515 int index, i = 0; 383 516 u16 data_offset, size; 384 - union igp_info *igp_info; 385 517 union vram_info *vram_info; 386 - union umc_info *umc_info; 387 518 union vram_module *vram_module; 388 519 u8 frev, crev; 389 520 u8 mem_type; ··· 523 392 u32 mem_channel_width; 524 393 u32 module_id; 525 394 526 - if (adev->flags & AMD_IS_APU) 527 - index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 528 - integratedsysteminfo); 529 - else { 530 - switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 531 - case IP_VERSION(12, 0, 0): 532 - case IP_VERSION(12, 0, 1): 533 - index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, umc_info); 534 - break; 535 - default: 536 - index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, vram_info); 537 - } 538 - } 395 + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, vram_info); 396 + 539 397 if (amdgpu_atom_parse_data_header(mode_info->atom_context, 540 398 index, &size, 541 399 &frev, &crev, &data_offset)) { 542 - if (adev->flags & AMD_IS_APU) { 543 - igp_info = (union igp_info *) 544 - (mode_info->atom_context->bios + data_offset); 545 - switch (frev) { 546 - case 1: 547 - switch (crev) { 548 - case 11: 549 - case 12: 550 - mem_channel_number = igp_info->v11.umachannelnumber; 551 - if (!mem_channel_number) 552 - mem_channel_number = 1; 553 - mem_type = igp_info->v11.memorytype; 554 - if (mem_type == LpDdr5MemType) 555 - mem_channel_width = 32; 556 - else 557 - mem_channel_width = 64; 558 - if (vram_width) 559 - *vram_width = mem_channel_number * mem_channel_width; 560 - if (vram_type) 561 - *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 562 - break; 563 - default: 564 - return -EINVAL; 565 - } 400 + vram_info = (union vram_info *) 401 + (mode_info->atom_context->bios + data_offset); 402 + 403 + module_id = (RREG32(adev->bios_scratch_reg_offset + 4) & 0x00ff0000) >> 16; 404 + if (frev == 3) { 405 + switch (crev) { 406 + /* v30 */ 407 + case 0: 408 + vram_module = (union vram_module *)vram_info->v30.vram_module; 409 + mem_vendor = (vram_module->v30.dram_vendor_id) & 0xF; 410 + if (vram_vendor) 411 + *vram_vendor = mem_vendor; 412 + mem_type = vram_info->v30.memory_type; 413 + if (vram_type) 414 + *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 415 + mem_channel_number = vram_info->v30.channel_num; 416 + mem_channel_width = vram_info->v30.channel_width; 417 + if (vram_width) 418 + *vram_width = mem_channel_number * 16; 566 419 break; 567 - case 2: 568 - switch (crev) { 569 - case 1: 570 - case 2: 571 - mem_channel_number = igp_info->v21.umachannelnumber; 572 - if (!mem_channel_number) 573 - mem_channel_number = 1; 574 - mem_type = igp_info->v21.memorytype; 575 - if (mem_type == LpDdr5MemType) 576 - mem_channel_width = 32; 577 - else 578 - mem_channel_width = 64; 579 - if (vram_width) 580 - *vram_width = mem_channel_number * mem_channel_width; 581 - if (vram_type) 582 - *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 583 - break; 584 - case 3: 585 - mem_channel_number = igp_info->v23.umachannelnumber; 586 - if (!mem_channel_number) 587 - mem_channel_number = 1; 588 - mem_type = igp_info->v23.memorytype; 589 - if (mem_type == LpDdr5MemType) 590 - mem_channel_width = 32; 591 - else 592 - mem_channel_width = 64; 593 - if (vram_width) 594 - *vram_width = mem_channel_number * mem_channel_width; 595 - if (vram_type) 596 - *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 597 - break; 598 - default: 599 - return -EINVAL; 420 + default: 421 + return -EINVAL; 422 + } 423 + } else if (frev == 2) { 424 + switch (crev) { 425 + /* v23 */ 426 + case 3: 427 + if (module_id > vram_info->v23.vram_module_num) 428 + module_id = 0; 429 + vram_module = (union vram_module *)vram_info->v23.vram_module; 430 + while (i < module_id) { 431 + vram_module = (union vram_module *) 432 + ((u8 *)vram_module + vram_module->v9.vram_module_size); 433 + i++; 600 434 } 435 + mem_type = vram_module->v9.memory_type; 436 + if (vram_type) 437 + *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 438 + mem_channel_number = vram_module->v9.channel_num; 439 + mem_channel_width = vram_module->v9.channel_width; 440 + if (vram_width) 441 + *vram_width = mem_channel_number * (1 << mem_channel_width); 442 + mem_vendor = (vram_module->v9.vender_rev_id) & 0xF; 443 + if (vram_vendor) 444 + *vram_vendor = mem_vendor; 445 + break; 446 + /* v24 */ 447 + case 4: 448 + if (module_id > vram_info->v24.vram_module_num) 449 + module_id = 0; 450 + vram_module = (union vram_module *)vram_info->v24.vram_module; 451 + while (i < module_id) { 452 + vram_module = (union vram_module *) 453 + ((u8 *)vram_module + vram_module->v10.vram_module_size); 454 + i++; 455 + } 456 + mem_type = vram_module->v10.memory_type; 457 + if (vram_type) 458 + *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 459 + mem_channel_number = vram_module->v10.channel_num; 460 + mem_channel_width = vram_module->v10.channel_width; 461 + if (vram_width) 462 + *vram_width = mem_channel_number * (1 << mem_channel_width); 463 + mem_vendor = (vram_module->v10.vender_rev_id) & 0xF; 464 + if (vram_vendor) 465 + *vram_vendor = mem_vendor; 466 + break; 467 + /* v25 */ 468 + case 5: 469 + if (module_id > vram_info->v25.vram_module_num) 470 + module_id = 0; 471 + vram_module = (union vram_module *)vram_info->v25.vram_module; 472 + while (i < module_id) { 473 + vram_module = (union vram_module *) 474 + ((u8 *)vram_module + vram_module->v11.vram_module_size); 475 + i++; 476 + } 477 + mem_type = vram_module->v11.memory_type; 478 + if (vram_type) 479 + *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 480 + mem_channel_number = vram_module->v11.channel_num; 481 + mem_channel_width = vram_module->v11.channel_width; 482 + if (vram_width) 483 + *vram_width = mem_channel_number * (1 << mem_channel_width); 484 + mem_vendor = (vram_module->v11.vender_rev_id) & 0xF; 485 + if (vram_vendor) 486 + *vram_vendor = mem_vendor; 487 + break; 488 + /* v26 */ 489 + case 6: 490 + if (module_id > vram_info->v26.vram_module_num) 491 + module_id = 0; 492 + vram_module = (union vram_module *)vram_info->v26.vram_module; 493 + while (i < module_id) { 494 + vram_module = (union vram_module *) 495 + ((u8 *)vram_module + vram_module->v9.vram_module_size); 496 + i++; 497 + } 498 + mem_type = vram_module->v9.memory_type; 499 + if (vram_type) 500 + *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 501 + mem_channel_number = vram_module->v9.channel_num; 502 + mem_channel_width = vram_module->v9.channel_width; 503 + if (vram_width) 504 + *vram_width = mem_channel_number * (1 << mem_channel_width); 505 + mem_vendor = (vram_module->v9.vender_rev_id) & 0xF; 506 + if (vram_vendor) 507 + *vram_vendor = mem_vendor; 601 508 break; 602 509 default: 603 510 return -EINVAL; 604 511 } 605 512 } else { 606 - switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 607 - case IP_VERSION(12, 0, 0): 608 - case IP_VERSION(12, 0, 1): 609 - umc_info = (union umc_info *)(mode_info->atom_context->bios + data_offset); 610 - 611 - if (frev == 4) { 612 - switch (crev) { 613 - case 0: 614 - mem_channel_number = le32_to_cpu(umc_info->v40.channel_num); 615 - mem_type = le32_to_cpu(umc_info->v40.vram_type); 616 - mem_channel_width = le32_to_cpu(umc_info->v40.channel_width); 617 - mem_vendor = RREG32(adev->bios_scratch_reg_offset + 4) & 0xF; 618 - if (vram_vendor) 619 - *vram_vendor = mem_vendor; 620 - if (vram_type) 621 - *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 622 - if (vram_width) 623 - *vram_width = mem_channel_number * (1 << mem_channel_width); 624 - break; 625 - default: 626 - return -EINVAL; 627 - } 628 - } else 629 - return -EINVAL; 630 - break; 631 - default: 632 - vram_info = (union vram_info *) 633 - (mode_info->atom_context->bios + data_offset); 634 - 635 - module_id = (RREG32(adev->bios_scratch_reg_offset + 4) & 0x00ff0000) >> 16; 636 - if (frev == 3) { 637 - switch (crev) { 638 - /* v30 */ 639 - case 0: 640 - vram_module = (union vram_module *)vram_info->v30.vram_module; 641 - mem_vendor = (vram_module->v30.dram_vendor_id) & 0xF; 642 - if (vram_vendor) 643 - *vram_vendor = mem_vendor; 644 - mem_type = vram_info->v30.memory_type; 645 - if (vram_type) 646 - *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 647 - mem_channel_number = vram_info->v30.channel_num; 648 - mem_channel_width = vram_info->v30.channel_width; 649 - if (vram_width) 650 - *vram_width = mem_channel_number * 16; 651 - break; 652 - default: 653 - return -EINVAL; 654 - } 655 - } else if (frev == 2) { 656 - switch (crev) { 657 - /* v23 */ 658 - case 3: 659 - if (module_id > vram_info->v23.vram_module_num) 660 - module_id = 0; 661 - vram_module = (union vram_module *)vram_info->v23.vram_module; 662 - while (i < module_id) { 663 - vram_module = (union vram_module *) 664 - ((u8 *)vram_module + vram_module->v9.vram_module_size); 665 - i++; 666 - } 667 - mem_type = vram_module->v9.memory_type; 668 - if (vram_type) 669 - *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 670 - mem_channel_number = vram_module->v9.channel_num; 671 - mem_channel_width = vram_module->v9.channel_width; 672 - if (vram_width) 673 - *vram_width = mem_channel_number * (1 << mem_channel_width); 674 - mem_vendor = (vram_module->v9.vender_rev_id) & 0xF; 675 - if (vram_vendor) 676 - *vram_vendor = mem_vendor; 677 - break; 678 - /* v24 */ 679 - case 4: 680 - if (module_id > vram_info->v24.vram_module_num) 681 - module_id = 0; 682 - vram_module = (union vram_module *)vram_info->v24.vram_module; 683 - while (i < module_id) { 684 - vram_module = (union vram_module *) 685 - ((u8 *)vram_module + vram_module->v10.vram_module_size); 686 - i++; 687 - } 688 - mem_type = vram_module->v10.memory_type; 689 - if (vram_type) 690 - *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 691 - mem_channel_number = vram_module->v10.channel_num; 692 - mem_channel_width = vram_module->v10.channel_width; 693 - if (vram_width) 694 - *vram_width = mem_channel_number * (1 << mem_channel_width); 695 - mem_vendor = (vram_module->v10.vender_rev_id) & 0xF; 696 - if (vram_vendor) 697 - *vram_vendor = mem_vendor; 698 - break; 699 - /* v25 */ 700 - case 5: 701 - if (module_id > vram_info->v25.vram_module_num) 702 - module_id = 0; 703 - vram_module = (union vram_module *)vram_info->v25.vram_module; 704 - while (i < module_id) { 705 - vram_module = (union vram_module *) 706 - ((u8 *)vram_module + vram_module->v11.vram_module_size); 707 - i++; 708 - } 709 - mem_type = vram_module->v11.memory_type; 710 - if (vram_type) 711 - *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 712 - mem_channel_number = vram_module->v11.channel_num; 713 - mem_channel_width = vram_module->v11.channel_width; 714 - if (vram_width) 715 - *vram_width = mem_channel_number * (1 << mem_channel_width); 716 - mem_vendor = (vram_module->v11.vender_rev_id) & 0xF; 717 - if (vram_vendor) 718 - *vram_vendor = mem_vendor; 719 - break; 720 - /* v26 */ 721 - case 6: 722 - if (module_id > vram_info->v26.vram_module_num) 723 - module_id = 0; 724 - vram_module = (union vram_module *)vram_info->v26.vram_module; 725 - while (i < module_id) { 726 - vram_module = (union vram_module *) 727 - ((u8 *)vram_module + vram_module->v9.vram_module_size); 728 - i++; 729 - } 730 - mem_type = vram_module->v9.memory_type; 731 - if (vram_type) 732 - *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 733 - mem_channel_number = vram_module->v9.channel_num; 734 - mem_channel_width = vram_module->v9.channel_width; 735 - if (vram_width) 736 - *vram_width = mem_channel_number * (1 << mem_channel_width); 737 - mem_vendor = (vram_module->v9.vender_rev_id) & 0xF; 738 - if (vram_vendor) 739 - *vram_vendor = mem_vendor; 740 - break; 741 - default: 742 - return -EINVAL; 743 - } 744 - } else { 745 - /* invalid frev */ 746 - return -EINVAL; 747 - } 748 - } 513 + /* invalid frev */ 514 + return -EINVAL; 749 515 } 516 + 517 + } else { 518 + return -EINVAL; 750 519 } 751 520 752 521 return 0;
+4
drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
··· 30 30 bool amdgpu_atomfirmware_gpu_virtualization_supported(struct amdgpu_device *adev); 31 31 void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev); 32 32 int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev); 33 + int amdgpu_atomfirmware_get_integrated_system_info(struct amdgpu_device *adev, 34 + int *vram_width, int *vram_type, int *vram_vendor); 35 + int amdgpu_atomfirmware_get_umc_info(struct amdgpu_device *adev, 36 + int *vram_width, int *vram_type, int *vram_vendor); 33 37 int amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev, 34 38 int *vram_width, int *vram_type, int *vram_vendor); 35 39 int amdgpu_atomfirmware_get_uma_carveout_info(struct amdgpu_device *adev,
+29
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
··· 34 34 #include "amdgpu_ras.h" 35 35 #include "amdgpu_reset.h" 36 36 #include "amdgpu_xgmi.h" 37 + #include "amdgpu_atomfirmware.h" 37 38 38 39 #include <drm/drm_drv.h> 39 40 #include <drm/ttm/ttm_tt.h> ··· 1746 1745 "Mem ranges not matching with hardware config\n"); 1747 1746 } 1748 1747 1748 + return 0; 1749 + } 1750 + 1751 + int amdgpu_gmc_get_vram_info(struct amdgpu_device *adev, 1752 + int *vram_width, int *vram_type, int *vram_vendor) 1753 + { 1754 + int ret = 0; 1755 + 1756 + if (adev->flags & AMD_IS_APU) 1757 + return amdgpu_atomfirmware_get_integrated_system_info(adev, 1758 + vram_width, vram_type, vram_vendor); 1759 + switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1760 + case IP_VERSION(12, 0, 0): 1761 + case IP_VERSION(12, 0, 1): 1762 + return amdgpu_atomfirmware_get_umc_info(adev, 1763 + vram_width, vram_type, vram_vendor); 1764 + case IP_VERSION(9, 5, 0): 1765 + case IP_VERSION(9, 4, 4): 1766 + case IP_VERSION(9, 4, 3): 1767 + ret = amdgpu_atomfirmware_get_umc_info(adev, 1768 + vram_width, vram_type, vram_vendor); 1769 + if (vram_width && !ret) 1770 + *vram_width *= hweight32(adev->aid_mask); 1771 + return ret; 1772 + default: 1773 + return amdgpu_atomfirmware_get_vram_info(adev, 1774 + vram_width, vram_type, vram_vendor); 1775 + } 1749 1776 return 0; 1750 1777 }
+2
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
··· 482 482 int amdgpu_gmc_init_mem_ranges(struct amdgpu_device *adev); 483 483 void amdgpu_gmc_init_sw_mem_ranges(struct amdgpu_device *adev, 484 484 struct amdgpu_mem_partition_info *mem_ranges); 485 + int amdgpu_gmc_get_vram_info(struct amdgpu_device *adev, 486 + int *vram_width, int *vram_type, int *vram_vendor); 485 487 #endif
+1 -1
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
··· 767 767 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_GDDR6; 768 768 adev->gmc.vram_width = 1 * 128; /* numchan * chansize */ 769 769 } else { 770 - r = amdgpu_atomfirmware_get_vram_info(adev, 770 + r = amdgpu_gmc_get_vram_info(adev, 771 771 &vram_width, &vram_type, &vram_vendor); 772 772 adev->gmc.vram_width = vram_width; 773 773
+1 -1
drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
··· 751 751 752 752 spin_lock_init(&adev->gmc.invalidate_lock); 753 753 754 - r = amdgpu_atomfirmware_get_vram_info(adev, 754 + r = amdgpu_gmc_get_vram_info(adev, 755 755 &vram_width, &vram_type, &vram_vendor); 756 756 adev->gmc.vram_width = vram_width; 757 757
+1 -1
drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
··· 825 825 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 1, 0)) { 826 826 gmc_v12_1_init_vram_info(adev); 827 827 } else { 828 - r = amdgpu_atomfirmware_get_vram_info(adev, 828 + r = amdgpu_gmc_get_vram_info(adev, 829 829 &vram_width, &vram_type, &vram_vendor); 830 830 adev->gmc.vram_width = vram_width; 831 831 adev->gmc.vram_type = vram_type;
+29 -23
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
··· 1823 1823 adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0); 1824 1824 } 1825 1825 1826 - static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev) 1826 + static void gmc_v9_0_init_vram_info(struct amdgpu_device *adev) 1827 1827 { 1828 1828 static const u32 regBIF_BIOS_SCRATCH_4 = 0x50; 1829 + int dev_var = adev->pdev->device & 0xF; 1829 1830 u32 vram_info; 1830 1831 1831 - adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM; 1832 - adev->gmc.vram_width = 128 * 64; 1832 + if (adev->gmc.is_app_apu) { 1833 + adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM; 1834 + adev->gmc.vram_width = 128 * 64; 1835 + } else if (adev->flags & AMD_IS_APU) { 1836 + adev->gmc.vram_type = AMDGPU_VRAM_TYPE_DDR4; 1837 + adev->gmc.vram_width = 64 * 64; 1838 + } else if (amdgpu_is_multi_aid(adev)) { 1839 + adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM; 1840 + adev->gmc.vram_width = 128 * 64; 1833 1841 1834 - if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)) 1835 - adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E; 1842 + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)) 1843 + adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E; 1836 1844 1837 - if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) && 1838 - adev->rev_id == 0x3) 1839 - adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E; 1845 + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) && 1846 + adev->rev_id == 0x3) 1847 + adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E; 1840 1848 1841 - if (!(adev->flags & AMD_IS_APU) && !amdgpu_sriov_vf(adev)) { 1842 - vram_info = RREG32(regBIF_BIOS_SCRATCH_4); 1843 - adev->gmc.vram_vendor = vram_info & 0xF; 1849 + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) && 1850 + (dev_var == 0x5)) 1851 + adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E; 1852 + 1853 + if (!(adev->flags & AMD_IS_APU) && !amdgpu_sriov_vf(adev)) { 1854 + vram_info = RREG32(regBIF_BIOS_SCRATCH_4); 1855 + adev->gmc.vram_vendor = vram_info & 0xF; 1856 + } 1844 1857 } 1845 1858 } 1846 1859 ··· 1869 1856 1870 1857 spin_lock_init(&adev->gmc.invalidate_lock); 1871 1858 1872 - if (amdgpu_is_multi_aid(adev)) { 1873 - gmc_v9_4_3_init_vram_info(adev); 1874 - } else if (!adev->bios) { 1875 - if (adev->flags & AMD_IS_APU) { 1876 - adev->gmc.vram_type = AMDGPU_VRAM_TYPE_DDR4; 1877 - adev->gmc.vram_width = 64 * 64; 1878 - } else { 1879 - adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM; 1880 - adev->gmc.vram_width = 128 * 64; 1881 - } 1859 + if (!adev->bios) { 1860 + gmc_v9_0_init_vram_info(adev); 1882 1861 } else { 1883 - r = amdgpu_atomfirmware_get_vram_info(adev, 1884 - &vram_width, &vram_type, &vram_vendor); 1862 + r = amdgpu_gmc_get_vram_info(adev, 1863 + &vram_width, &vram_type, &vram_vendor); 1885 1864 if (amdgpu_sriov_vf(adev)) 1886 1865 /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN, 1887 1866 * and DF related registers is not readable, seems hardcord is the ··· 1901 1896 adev->gmc.vram_type = vram_type; 1902 1897 adev->gmc.vram_vendor = vram_vendor; 1903 1898 } 1899 + 1904 1900 switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { 1905 1901 case IP_VERSION(9, 1, 0): 1906 1902 case IP_VERSION(9, 2, 2):