Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull more arm64 updates from Catalin Marinas:
"The main 'feature' is a workaround for C1-Pro erratum 4193714
requiring IPIs during TLB maintenance if a process is running in user
space with SME enabled.

The hardware acknowledges the DVMSync messages before completing
in-flight SME accesses, with security implications. The workaround
makes use of the mm_cpumask() to track the cores that need
interrupting (arm64 hasn't used this mask before).

The rest are fixes for MPAM, CCA and generated header that turned up
during the merging window or shortly before.

Summary:

Core features:

- Add workaround for C1-Pro erratum 4193714 - early CME (SME unit)
DVMSync acknowledgement. The fix consists of sending IPIs on TLB
maintenance to those CPUs running in user space with SME enabled

- Include kernel-hwcap.h in list of generated files (missed in a
recent commit generating the KERNEL_HWCAP_* macros)

CCA:

- Fix RSI_INCOMPLETE error check in arm-cca-guest

MPAM:

- Fix an unmount->remount problem with the CDP emulation,
uninitialised variable and checker warnings"

* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
arm_mpam: resctrl: Make resctrl_mon_ctx_waiters static
arm_mpam: resctrl: Fix the check for no monitor components found
arm_mpam: resctrl: Fix MBA CDP alloc_capable handling on unmount
virt: arm-cca-guest: fix error check for RSI_INCOMPLETE
arm64/hwcap: Include kernel-hwcap.h in list of generated files
arm64: errata: Work around early CME DVMSync acknowledgement
arm64: cputype: Add C1-Pro definitions
arm64: tlb: Pass the corresponding mm to __tlbi_sync_s1ish()
arm64: tlb: Introduce __tlbi_sync_s1ish_{kernel,batch}() for TLB maintenance

+297 -13
+2
Documentation/arch/arm64/silicon-errata.rst
··· 202 202 +----------------+-----------------+-----------------+-----------------------------+ 203 203 | ARM | Neoverse-V3AE | #3312417 | ARM64_ERRATUM_3194386 | 204 204 +----------------+-----------------+-----------------+-----------------------------+ 205 + | ARM | C1-Pro | #4193714 | ARM64_ERRATUM_4193714 | 206 + +----------------+-----------------+-----------------+-----------------------------+ 205 207 | ARM | MMU-500 | #841119,826419 | ARM_SMMU_MMU_500_CPRE_ERRATA| 206 208 | | | #562869,1047329 | | 207 209 +----------------+-----------------+-----------------+-----------------------------+
+12
arch/arm64/Kconfig
··· 1142 1142 1143 1143 If unsure, say Y. 1144 1144 1145 + config ARM64_ERRATUM_4193714 1146 + bool "C1-Pro: 4193714: SME DVMSync early acknowledgement" 1147 + depends on ARM64_SME 1148 + default y 1149 + help 1150 + Enable workaround for C1-Pro acknowledging the DVMSync before 1151 + the SME memory accesses are complete. This will cause TLB 1152 + maintenance for processes using SME to also issue an IPI to 1153 + the affected CPUs. 1154 + 1155 + If unsure, say Y. 1156 + 1145 1157 config CAVIUM_ERRATUM_22375 1146 1158 bool "Cavium erratum 22375, 24313" 1147 1159 default y
+1
arch/arm64/include/asm/Kbuild
··· 17 17 generic-y += user.h 18 18 19 19 generated-y += cpucap-defs.h 20 + generated-y += kernel-hwcap.h 20 21 generated-y += sysreg-defs.h
+2
arch/arm64/include/asm/cpucaps.h
··· 64 64 return IS_ENABLED(CONFIG_ARM64_WORKAROUND_REPEAT_TLBI); 65 65 case ARM64_WORKAROUND_SPECULATIVE_SSBS: 66 66 return IS_ENABLED(CONFIG_ARM64_ERRATUM_3194386); 67 + case ARM64_WORKAROUND_4193714: 68 + return IS_ENABLED(CONFIG_ARM64_ERRATUM_4193714); 67 69 case ARM64_MPAM: 68 70 /* 69 71 * KVM MPAM support doesn't rely on the host kernel supporting MPAM.
+2
arch/arm64/include/asm/cputype.h
··· 98 98 #define ARM_CPU_PART_CORTEX_A725 0xD87 99 99 #define ARM_CPU_PART_CORTEX_A720AE 0xD89 100 100 #define ARM_CPU_PART_NEOVERSE_N3 0xD8E 101 + #define ARM_CPU_PART_C1_PRO 0xD8B 101 102 102 103 #define APM_CPU_PART_XGENE 0x000 103 104 #define APM_CPU_VAR_POTENZA 0x00 ··· 190 189 #define MIDR_CORTEX_A725 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A725) 191 190 #define MIDR_CORTEX_A720AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A720AE) 192 191 #define MIDR_NEOVERSE_N3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N3) 192 + #define MIDR_C1_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_C1_PRO) 193 193 #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) 194 194 #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) 195 195 #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
+21
arch/arm64/include/asm/fpsimd.h
··· 428 428 return __sme_state_size(task_get_sme_vl(task)); 429 429 } 430 430 431 + void sme_enable_dvmsync(void); 432 + void sme_set_active(void); 433 + void sme_clear_active(void); 434 + 435 + static inline void sme_enter_from_user_mode(void) 436 + { 437 + if (alternative_has_cap_unlikely(ARM64_WORKAROUND_4193714) && 438 + test_thread_flag(TIF_SME)) 439 + sme_clear_active(); 440 + } 441 + 442 + static inline void sme_exit_to_user_mode(void) 443 + { 444 + if (alternative_has_cap_unlikely(ARM64_WORKAROUND_4193714) && 445 + test_thread_flag(TIF_SME)) 446 + sme_set_active(); 447 + } 448 + 431 449 #else 432 450 433 451 static inline void sme_user_disable(void) { BUILD_BUG(); } ··· 473 455 { 474 456 return 0; 475 457 } 458 + 459 + static inline void sme_enter_from_user_mode(void) { } 460 + static inline void sme_exit_to_user_mode(void) { } 476 461 477 462 #endif /* ! CONFIG_ARM64_SME */ 478 463
+8 -2
arch/arm64/include/asm/tlbbatch.h
··· 2 2 #ifndef _ARCH_ARM64_TLBBATCH_H 3 3 #define _ARCH_ARM64_TLBBATCH_H 4 4 5 + #include <linux/cpumask.h> 6 + 5 7 struct arch_tlbflush_unmap_batch { 8 + #ifdef CONFIG_ARM64_ERRATUM_4193714 6 9 /* 7 - * For arm64, HW can do tlb shootdown, so we don't 8 - * need to record cpumask for sending IPI 10 + * Track CPUs that need SME DVMSync on completion of this batch. 11 + * Otherwise, the arm64 HW can do tlb shootdown, so we don't need to 12 + * record cpumask for sending IPI 9 13 */ 14 + cpumask_var_t cpumask; 15 + #endif 10 16 }; 11 17 12 18 #endif /* _ARCH_ARM64_TLBBATCH_H */
+87 -7
arch/arm64/include/asm/tlbflush.h
··· 80 80 } 81 81 } 82 82 83 + #ifdef CONFIG_ARM64_ERRATUM_4193714 84 + 85 + void sme_do_dvmsync(const struct cpumask *mask); 86 + 87 + static inline void sme_dvmsync(struct mm_struct *mm) 88 + { 89 + if (!alternative_has_cap_unlikely(ARM64_WORKAROUND_4193714)) 90 + return; 91 + 92 + sme_do_dvmsync(mm_cpumask(mm)); 93 + } 94 + 95 + static inline void sme_dvmsync_add_pending(struct arch_tlbflush_unmap_batch *batch, 96 + struct mm_struct *mm) 97 + { 98 + if (!alternative_has_cap_unlikely(ARM64_WORKAROUND_4193714)) 99 + return; 100 + 101 + /* 102 + * Order the mm_cpumask() read after the hardware DVMSync. 103 + */ 104 + dsb(ish); 105 + if (cpumask_empty(mm_cpumask(mm))) 106 + return; 107 + 108 + /* 109 + * Allocate the batch cpumask on first use. Fall back to an immediate 110 + * IPI for this mm in case of failure. 111 + */ 112 + if (!cpumask_available(batch->cpumask) && 113 + !zalloc_cpumask_var(&batch->cpumask, GFP_ATOMIC)) { 114 + sme_do_dvmsync(mm_cpumask(mm)); 115 + return; 116 + } 117 + 118 + cpumask_or(batch->cpumask, batch->cpumask, mm_cpumask(mm)); 119 + } 120 + 121 + static inline void sme_dvmsync_batch(struct arch_tlbflush_unmap_batch *batch) 122 + { 123 + if (!alternative_has_cap_unlikely(ARM64_WORKAROUND_4193714)) 124 + return; 125 + 126 + if (!cpumask_available(batch->cpumask)) 127 + return; 128 + 129 + sme_do_dvmsync(batch->cpumask); 130 + cpumask_clear(batch->cpumask); 131 + } 132 + 133 + #else 134 + 135 + static inline void sme_dvmsync(struct mm_struct *mm) 136 + { 137 + } 138 + static inline void sme_dvmsync_add_pending(struct arch_tlbflush_unmap_batch *batch, 139 + struct mm_struct *mm) 140 + { 141 + } 142 + static inline void sme_dvmsync_batch(struct arch_tlbflush_unmap_batch *batch) 143 + { 144 + } 145 + 146 + #endif /* CONFIG_ARM64_ERRATUM_4193714 */ 147 + 83 148 /* 84 149 * Level-based TLBI operations. 85 150 * ··· 278 213 * Complete broadcast TLB maintenance issued by the host which invalidates 279 214 * stage 1 information in the host's own translation regime. 280 215 */ 281 - static inline void __tlbi_sync_s1ish(void) 216 + static inline void __tlbi_sync_s1ish(struct mm_struct *mm) 217 + { 218 + dsb(ish); 219 + __repeat_tlbi_sync(vale1is, 0); 220 + sme_dvmsync(mm); 221 + } 222 + 223 + static inline void __tlbi_sync_s1ish_batch(struct arch_tlbflush_unmap_batch *batch) 224 + { 225 + dsb(ish); 226 + __repeat_tlbi_sync(vale1is, 0); 227 + sme_dvmsync_batch(batch); 228 + } 229 + 230 + static inline void __tlbi_sync_s1ish_kernel(void) 282 231 { 283 232 dsb(ish); 284 233 __repeat_tlbi_sync(vale1is, 0); ··· 401 322 { 402 323 dsb(ishst); 403 324 __tlbi(vmalle1is); 404 - __tlbi_sync_s1ish(); 325 + __tlbi_sync_s1ish_kernel(); 405 326 isb(); 406 327 } 407 328 ··· 413 334 asid = __TLBI_VADDR(0, ASID(mm)); 414 335 __tlbi(aside1is, asid); 415 336 __tlbi_user(aside1is, asid); 416 - __tlbi_sync_s1ish(); 337 + __tlbi_sync_s1ish(mm); 417 338 mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL); 418 339 } 419 340 ··· 434 355 */ 435 356 static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch) 436 357 { 437 - __tlbi_sync_s1ish(); 358 + __tlbi_sync_s1ish_batch(batch); 438 359 } 439 360 440 361 /* ··· 636 557 637 558 if (!(flags & TLBF_NOSYNC)) { 638 559 if (!(flags & TLBF_NOBROADCAST)) 639 - __tlbi_sync_s1ish(); 560 + __tlbi_sync_s1ish(mm); 640 561 else 641 562 dsb(nsh); 642 563 } ··· 697 618 dsb(ishst); 698 619 __flush_s1_tlb_range_op(vaale1is, start, pages, stride, 0, 699 620 TLBI_TTL_UNKNOWN); 700 - __tlbi_sync_s1ish(); 621 + __tlbi_sync_s1ish_kernel(); 701 622 isb(); 702 623 } 703 624 ··· 711 632 712 633 dsb(ishst); 713 634 __tlbi(vaae1is, addr); 714 - __tlbi_sync_s1ish(); 635 + __tlbi_sync_s1ish_kernel(); 715 636 isb(); 716 637 } 717 638 ··· 722 643 723 644 __flush_tlb_range(&vma, start, end, PAGE_SIZE, 3, 724 645 TLBF_NOWALKCACHE | TLBF_NOSYNC); 646 + sme_dvmsync_add_pending(batch, mm); 725 647 } 726 648 727 649 static inline bool __pte_flags_need_flush(ptdesc_t oldval, ptdesc_t newval)
+30
arch/arm64/kernel/cpu_errata.c
··· 11 11 #include <asm/cpu.h> 12 12 #include <asm/cputype.h> 13 13 #include <asm/cpufeature.h> 14 + #include <asm/fpsimd.h> 14 15 #include <asm/kvm_asm.h> 15 16 #include <asm/smp_plat.h> 16 17 ··· 576 575 }; 577 576 #endif 578 577 578 + #ifdef CONFIG_ARM64_ERRATUM_4193714 579 + static bool has_sme_dvmsync_erratum(const struct arm64_cpu_capabilities *entry, 580 + int scope) 581 + { 582 + if (!id_aa64pfr1_sme(read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1))) 583 + return false; 584 + 585 + return is_affected_midr_range(entry, scope); 586 + } 587 + 588 + static void cpu_enable_sme_dvmsync(const struct arm64_cpu_capabilities *__unused) 589 + { 590 + if (this_cpu_has_cap(ARM64_WORKAROUND_4193714)) 591 + sme_enable_dvmsync(); 592 + } 593 + #endif 594 + 579 595 #ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38 580 596 static const struct midr_range erratum_ac03_cpu_38_list[] = { 581 597 MIDR_ALL_VERSIONS(MIDR_AMPERE1), ··· 917 899 .capability = ARM64_WORKAROUND_4311569, 918 900 .type = ARM64_CPUCAP_SYSTEM_FEATURE, 919 901 .matches = need_arm_si_l1_workaround_4311569, 902 + }, 903 + #endif 904 + #ifdef CONFIG_ARM64_ERRATUM_4193714 905 + { 906 + .desc = "C1-Pro SME DVMSync early acknowledgement", 907 + .capability = ARM64_WORKAROUND_4193714, 908 + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, 909 + .matches = has_sme_dvmsync_erratum, 910 + .cpu_enable = cpu_enable_sme_dvmsync, 911 + /* C1-Pro r0p0 - r1p2 (the latter only when REVIDR_EL1[0]==0) */ 912 + .midr_range = MIDR_RANGE(MIDR_C1_PRO, 0, 0, 1, 2), 913 + MIDR_FIXED(MIDR_CPU_VAR_REV(1, 2), BIT(0)), 920 914 }, 921 915 #endif 922 916 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
+3
arch/arm64/kernel/entry-common.c
··· 21 21 #include <asm/daifflags.h> 22 22 #include <asm/esr.h> 23 23 #include <asm/exception.h> 24 + #include <asm/fpsimd.h> 24 25 #include <asm/irq_regs.h> 25 26 #include <asm/kprobes.h> 26 27 #include <asm/mmu.h> ··· 71 70 { 72 71 enter_from_user_mode(regs); 73 72 mte_disable_tco_entry(current); 73 + sme_enter_from_user_mode(); 74 74 } 75 75 76 76 /* ··· 85 83 local_irq_disable(); 86 84 exit_to_user_mode_prepare_legacy(regs); 87 85 local_daif_mask(); 86 + sme_exit_to_user_mode(); 88 87 mte_check_tfsr_exit(); 89 88 exit_to_user_mode(); 90 89 }
+79
arch/arm64/kernel/fpsimd.c
··· 15 15 #include <linux/compiler.h> 16 16 #include <linux/cpu.h> 17 17 #include <linux/cpu_pm.h> 18 + #include <linux/cpumask.h> 18 19 #include <linux/ctype.h> 19 20 #include <linux/kernel.h> 20 21 #include <linux/linkage.h> ··· 29 28 #include <linux/sched/task_stack.h> 30 29 #include <linux/signal.h> 31 30 #include <linux/slab.h> 31 + #include <linux/smp.h> 32 32 #include <linux/stddef.h> 33 33 #include <linux/sysctl.h> 34 34 #include <linux/swab.h> ··· 1359 1357 1360 1358 put_cpu_fpsimd_context(); 1361 1359 } 1360 + 1361 + #ifdef CONFIG_ARM64_ERRATUM_4193714 1362 + 1363 + /* 1364 + * SME/CME erratum handling. 1365 + */ 1366 + static cpumask_t sme_dvmsync_cpus; 1367 + 1368 + /* 1369 + * These helpers are only called from non-preemptible contexts, so 1370 + * smp_processor_id() is safe here. 1371 + */ 1372 + void sme_set_active(void) 1373 + { 1374 + unsigned int cpu = smp_processor_id(); 1375 + 1376 + if (!cpumask_test_cpu(cpu, &sme_dvmsync_cpus)) 1377 + return; 1378 + 1379 + cpumask_set_cpu(cpu, mm_cpumask(current->mm)); 1380 + 1381 + /* 1382 + * A subsequent (post ERET) SME access may use a stale address 1383 + * translation. On C1-Pro, a TLBI+DSB on a different CPU will wait for 1384 + * the completion of cpumask_set_cpu() above as it appears in program 1385 + * order before the SME access. The post-TLBI+DSB read of mm_cpumask() 1386 + * will lead to the IPI being issued. 1387 + * 1388 + * https://lore.kernel.org/r/ablEXwhfKyJW1i7l@J2N7QTR9R3 1389 + */ 1390 + } 1391 + 1392 + void sme_clear_active(void) 1393 + { 1394 + unsigned int cpu = smp_processor_id(); 1395 + 1396 + if (!cpumask_test_cpu(cpu, &sme_dvmsync_cpus)) 1397 + return; 1398 + 1399 + /* 1400 + * With SCTLR_EL1.IESB enabled, the SME memory transactions are 1401 + * completed on entering EL1. 1402 + */ 1403 + cpumask_clear_cpu(cpu, mm_cpumask(current->mm)); 1404 + } 1405 + 1406 + static void sme_dvmsync_ipi(void *unused) 1407 + { 1408 + /* 1409 + * With SCTLR_EL1.IESB on, taking an exception is sufficient to ensure 1410 + * the completion of the SME memory accesses, so no need for an 1411 + * explicit DSB. 1412 + */ 1413 + } 1414 + 1415 + void sme_do_dvmsync(const struct cpumask *mask) 1416 + { 1417 + /* 1418 + * This is called from the TLB maintenance functions after the DSB ISH 1419 + * to send the hardware DVMSync message. If this CPU sees the mask as 1420 + * empty, the remote CPU executing sme_set_active() would have seen 1421 + * the DVMSync and no IPI required. 1422 + */ 1423 + if (cpumask_empty(mask)) 1424 + return; 1425 + 1426 + preempt_disable(); 1427 + smp_call_function_many(mask, sme_dvmsync_ipi, NULL, true); 1428 + preempt_enable(); 1429 + } 1430 + 1431 + void sme_enable_dvmsync(void) 1432 + { 1433 + cpumask_set_cpu(smp_processor_id(), &sme_dvmsync_cpus); 1434 + } 1435 + 1436 + #endif /* CONFIG_ARM64_ERRATUM_4193714 */ 1362 1437 1363 1438 /* 1364 1439 * Trapped SME access
+36
arch/arm64/kernel/process.c
··· 26 26 #include <linux/reboot.h> 27 27 #include <linux/interrupt.h> 28 28 #include <linux/init.h> 29 + #include <linux/cpumask.h> 29 30 #include <linux/cpu.h> 30 31 #include <linux/elfcore.h> 31 32 #include <linux/pm.h> ··· 341 340 flush_gcs(); 342 341 } 343 342 343 + #ifdef CONFIG_ARM64_ERRATUM_4193714 344 + 345 + static void arch_dup_tlbbatch_mask(struct task_struct *dst) 346 + { 347 + /* 348 + * Clear the inherited cpumask with memset() to cover both cases where 349 + * cpumask_var_t is a pointer or an array. It will be allocated lazily 350 + * in sme_dvmsync_add_pending() if CPUMASK_OFFSTACK=y. 351 + */ 352 + if (alternative_has_cap_unlikely(ARM64_WORKAROUND_4193714)) 353 + memset(&dst->tlb_ubc.arch.cpumask, 0, 354 + sizeof(dst->tlb_ubc.arch.cpumask)); 355 + } 356 + 357 + static void arch_release_tlbbatch_mask(struct task_struct *tsk) 358 + { 359 + if (alternative_has_cap_unlikely(ARM64_WORKAROUND_4193714)) 360 + free_cpumask_var(tsk->tlb_ubc.arch.cpumask); 361 + } 362 + 363 + #else 364 + 365 + static void arch_dup_tlbbatch_mask(struct task_struct *dst) 366 + { 367 + } 368 + 369 + static void arch_release_tlbbatch_mask(struct task_struct *tsk) 370 + { 371 + } 372 + 373 + #endif /* CONFIG_ARM64_ERRATUM_4193714 */ 374 + 344 375 void arch_release_task_struct(struct task_struct *tsk) 345 376 { 377 + arch_release_tlbbatch_mask(tsk); 346 378 fpsimd_release_task(tsk); 347 379 } 348 380 ··· 390 356 fpsimd_sync_from_effective_state(src); 391 357 392 358 *dst = *src; 359 + 360 + arch_dup_tlbbatch_mask(dst); 393 361 394 362 /* 395 363 * Drop stale reference to src's sve_state and convert dst to
+1 -1
arch/arm64/kernel/sys_compat.c
··· 37 37 * We pick the reserved-ASID to minimise the impact. 38 38 */ 39 39 __tlbi(aside1is, 0UL); 40 - __tlbi_sync_s1ish(); 40 + __tlbi_sync_s1ish(current->mm); 41 41 } 42 42 43 43 ret = caches_clean_inval_user_pou(start, start + chunk);
+1
arch/arm64/tools/cpucaps
··· 106 106 WORKAROUND_2457168 107 107 WORKAROUND_2645198 108 108 WORKAROUND_2658417 109 + WORKAROUND_4193714 109 110 WORKAROUND_4311569 110 111 WORKAROUND_AMPERE_AC03_CPU_38 111 112 WORKAROUND_AMPERE_AC04_CPU_23
+10 -2
drivers/resctrl/mpam_resctrl.c
··· 22 22 23 23 #include "mpam_internal.h" 24 24 25 - DECLARE_WAIT_QUEUE_HEAD(resctrl_mon_ctx_waiters); 25 + static DECLARE_WAIT_QUEUE_HEAD(resctrl_mon_ctx_waiters); 26 26 27 27 /* 28 28 * The classes we've picked to map to resctrl resources, wrapped ··· 220 220 if (cdp_enabled && !mpam_resctrl_controls[RDT_RESOURCE_MBA].cdp_enabled) 221 221 mpam_resctrl_controls[RDT_RESOURCE_MBA].resctrl_res.alloc_capable = false; 222 222 223 + /* 224 + * If resctrl has attempted to enable CDP on MBA, re-enable MBA as two 225 + * configurations will be provided so there is no aliasing problem. 226 + */ 223 227 if (mpam_resctrl_controls[RDT_RESOURCE_MBA].cdp_enabled && 224 228 mpam_resctrl_controls[RDT_RESOURCE_MBA].class) 229 + mpam_resctrl_controls[RDT_RESOURCE_MBA].resctrl_res.alloc_capable = true; 230 + 231 + /* On unmount when CDP is disabled, re-enable MBA */ 232 + if (!cdp_enabled && mpam_resctrl_controls[RDT_RESOURCE_MBA].class) 225 233 mpam_resctrl_controls[RDT_RESOURCE_MBA].resctrl_res.alloc_capable = true; 226 234 227 235 if (enable) { ··· 1407 1399 } 1408 1400 1409 1401 if (r->mon_capable) { 1410 - struct mpam_component *any_mon_comp; 1402 + struct mpam_component *any_mon_comp = NULL; 1411 1403 struct mpam_resctrl_mon *mon; 1412 1404 enum resctrl_event_id eventid; 1413 1405
+2 -1
drivers/virt/coco/arm-cca-guest/arm-cca-guest.c
··· 157 157 } while (info.result == RSI_INCOMPLETE && 158 158 info.offset < RSI_GRANULE_SIZE); 159 159 160 - if (info.result != RSI_SUCCESS) { 160 + /* Break out in case of failure */ 161 + if (info.result != RSI_SUCCESS && info.result != RSI_INCOMPLETE) { 161 162 ret = -ENXIO; 162 163 token_size = 0; 163 164 goto exit_free_granule_page;