···619619620620 bool621621622622-config FIT_IMAGE_FDT_EPM5623623- bool "Include FDT for Mobileye EyeQ5 development platforms"624624- depends on MACH_EYEQ5625625- default n626626- help627627- Enable this to include the FDT for the EyeQ5 development platforms628628- from Mobileye in the FIT kernel image.629629- This requires u-boot on the platform.630630-631622config MACH_NINTENDO64632623 bool "Nintendo 64 console"633624 select CEVT_R4K···10011010 Say Y here for most Octeon reference boards.1002101110031012endchoice10131013+10141014+config FIT_IMAGE_FDT_EPM510151015+ bool "Include FDT for Mobileye EyeQ5 development platforms"10161016+ depends on MACH_EYEQ510171017+ default n10181018+ help10191019+ Enable this to include the FDT for the EyeQ5 development platforms10201020+ from Mobileye in the FIT kernel image.10211021+ This requires u-boot on the platform.1004102210051023source "arch/mips/alchemy/Kconfig"10061024source "arch/mips/ath25/Kconfig"
···24392439 # with named address spaces - see GCC PR sanitizer/111736.24402440 #24412441 depends on !KASAN24422442+ # -fsanitize=thread (KCSAN) is also incompatible.24432443+ depends on !KCSAN2442244424432445config CC_HAS_SLS24442446 def_bool $(cc-option,-mharden-sls=all)
-2
arch/x86/Makefile
···251251252252libs-y += arch/x86/lib/253253254254-core-y += arch/x86/virt/255255-256254# drivers-y are linked after core-y257255drivers-$(CONFIG_MATH_EMULATION) += arch/x86/math-emu/258256drivers-$(CONFIG_PCI) += arch/x86/pci/
+15-5
arch/x86/boot/compressed/efi_mixed.S
···1515 */16161717#include <linux/linkage.h>1818+#include <asm/asm-offsets.h>1819#include <asm/msr.h>1920#include <asm/page_types.h>2021#include <asm/processor-flags.h>2122#include <asm/segment.h>2323+#include <asm/setup.h>22242325 .code642426 .text···151149SYM_FUNC_START(efi32_stub_entry)152150 call 1f1531511: popl %ecx152152+ leal (efi32_boot_args - 1b)(%ecx), %ebx154153155154 /* Clear BSS */156155 xorl %eax, %eax···166163 popl %ecx167164 popl %edx168165 popl %esi166166+ movl %esi, 8(%ebx)169167 jmp efi32_entry170168SYM_FUNC_END(efi32_stub_entry)171169#endif···243239 *244240 * Arguments: %ecx image handle245241 * %edx EFI system table pointer246246- * %esi struct bootparams pointer (or NULL when not using247247- * the EFI handover protocol)248242 *249243 * Since this is the point of no return for ordinary execution, no registers250244 * are considered live except for the function parameters. [Note that the EFI···268266 leal (efi32_boot_args - 1b)(%ebx), %ebx269267 movl %ecx, 0(%ebx)270268 movl %edx, 4(%ebx)271271- movl %esi, 8(%ebx)272269 movb $0x0, 12(%ebx) // efi_is64270270+271271+ /*272272+ * Allocate some memory for a temporary struct boot_params, which only273273+ * needs the minimal pieces that startup_32() relies on.274274+ */275275+ subl $PARAM_SIZE, %esp276276+ movl %esp, %esi277277+ movl $PAGE_SIZE, BP_kernel_alignment(%esi)278278+ movl $_end - 1b, BP_init_size(%esi)279279+ subl $startup_32 - 1b, BP_init_size(%esi)273280274281 /* Disable paging */275282 movl %cr0, %eax···305294306295 movl 8(%ebp), %ecx // image_handle307296 movl 12(%ebp), %edx // sys_table308308- xorl %esi, %esi309309- jmp efi32_entry // pass %ecx, %edx, %esi297297+ jmp efi32_entry // pass %ecx, %edx310298 // no other registers remain live3112993123002: popl %edi // restore callee-save registers
···1313/*1414 * Defines x86 CPU feature bits1515 */1616-#define NCAPINTS 21 /* N 32-bit words worth of info */1616+#define NCAPINTS 22 /* N 32-bit words worth of info */1717#define NBUGINTS 2 /* N 32-bit bug flags */18181919/*···458458#define X86_FEATURE_SBPB (20*32+27) /* "" Selective Branch Prediction Barrier */459459#define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* "" MSR_PRED_CMD[IBPB] flushes all branch type predictions */460460#define X86_FEATURE_SRSO_NO (20*32+29) /* "" CPU is not affected by SRSO */461461+462462+/*463463+ * Extended auxiliary flags: Linux defined - for features scattered in various464464+ * CPUID levels like 0x80000022, etc.465465+ *466466+ * Reuse free bits when adding new feature flags!467467+ */468468+#define X86_FEATURE_AMD_LBR_PMC_FREEZE (21*32+ 0) /* AMD LBR and PMC Freeze */461469462470/*463471 * BUG word(s)
···262262.Lskip_rsb_\@:263263.endm264264265265+/*266266+ * The CALL to srso_alias_untrain_ret() must be patched in directly at267267+ * the spot where untraining must be done, ie., srso_alias_untrain_ret()268268+ * must be the target of a CALL instruction instead of indirectly269269+ * jumping to a wrapper which then calls it. Therefore, this macro is270270+ * called outside of __UNTRAIN_RET below, for the time being, before the271271+ * kernel can support nested alternatives with arbitrary nesting.272272+ */273273+.macro CALL_UNTRAIN_RET265274#if defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_MITIGATION_SRSO)266266-#define CALL_UNTRAIN_RET "call entry_untrain_ret"267267-#else268268-#define CALL_UNTRAIN_RET ""275275+ ALTERNATIVE_2 "", "call entry_untrain_ret", X86_FEATURE_UNRET, \276276+ "call srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS269277#endif278278+.endm270279271280/*272281 * Mitigate RETBleed for AMD/Hygon Zen uarch. Requires KERNEL CR3 because the···291282.macro __UNTRAIN_RET ibpb_feature, call_depth_insns292283#if defined(CONFIG_MITIGATION_RETHUNK) || defined(CONFIG_MITIGATION_IBPB_ENTRY)293284 VALIDATE_UNRET_END294294- ALTERNATIVE_3 "", \295295- CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \285285+ CALL_UNTRAIN_RET286286+ ALTERNATIVE_2 "", \296287 "call entry_ibpb", \ibpb_feature, \297288 __stringify(\call_depth_insns), X86_FEATURE_CALL_DEPTH298289#endif···350341#else351342static inline void retbleed_return_thunk(void) {}352343#endif344344+345345+extern void srso_alias_untrain_ret(void);353346354347#ifdef CONFIG_MITIGATION_SRSO355348extern void srso_return_thunk(void);
···22/*33 * EISA specific code44 */55+#include <linux/cc_platform.h>56#include <linux/ioport.h>67#include <linux/eisa.h>78#include <linux/io.h>···1312{1413 void __iomem *p;15141616- if (xen_pv_domain() && !xen_initial_domain())1515+ if ((xen_pv_domain() && !xen_initial_domain()) || cc_platform_has(CC_ATTR_GUEST_SEV_SNP))1716 return 0;18171918 p = ioremap(0x0FFFD9, 4);
+14-10
arch/x86/kernel/nmi.c
···580580581581static char *nmi_check_stall_msg[] = {582582/* */583583-/* +--------- nsp->idt_seq_snap & 0x1: CPU is in NMI handler. */583583+/* +--------- nmi_seq & 0x1: CPU is currently in NMI handler. */584584/* | +------ cpu_is_offline(cpu) */585585/* | | +--- nsp->idt_calls_snap != atomic_long_read(&nsp->idt_calls): */586586/* | | | NMI handler has been invoked. */···628628 nmi_seq = READ_ONCE(nsp->idt_nmi_seq);629629 if (nsp->idt_nmi_seq_snap + 1 == nmi_seq && (nmi_seq & 0x1)) {630630 msgp = "CPU entered NMI handler function, but has not exited";631631- } else if ((nsp->idt_nmi_seq_snap & 0x1) != (nmi_seq & 0x1)) {632632- msgp = "CPU is handling NMIs";633633- } else {634634- idx = ((nsp->idt_seq_snap & 0x1) << 2) |631631+ } else if (nsp->idt_nmi_seq_snap == nmi_seq ||632632+ nsp->idt_nmi_seq_snap + 1 == nmi_seq) {633633+ idx = ((nmi_seq & 0x1) << 2) |635634 (cpu_is_offline(cpu) << 1) |636635 (nsp->idt_calls_snap != atomic_long_read(&nsp->idt_calls));637636 msgp = nmi_check_stall_msg[idx];638637 if (nsp->idt_ignored_snap != READ_ONCE(nsp->idt_ignored) && (idx & 0x1))639638 modp = ", but OK because ignore_nmis was set";640640- if (nmi_seq & 0x1)641641- msghp = " (CPU currently in NMI handler function)";642642- else if (nsp->idt_nmi_seq_snap + 1 == nmi_seq)639639+ if (nsp->idt_nmi_seq_snap + 1 == nmi_seq)643640 msghp = " (CPU exited one NMI handler function)";641641+ else if (nmi_seq & 0x1)642642+ msghp = " (CPU currently in NMI handler function)";643643+ else644644+ msghp = " (CPU was never in an NMI handler function)";645645+ } else {646646+ msgp = "CPU is handling NMIs";644647 }645645- pr_alert("%s: CPU %d: %s%s%s, last activity: %lu jiffies ago.\n",646646- __func__, cpu, msgp, modp, msghp, j - READ_ONCE(nsp->recv_jiffies));648648+ pr_alert("%s: CPU %d: %s%s%s\n", __func__, cpu, msgp, modp, msghp);649649+ pr_alert("%s: last activity: %lu jiffies ago.\n",650650+ __func__, j - READ_ONCE(nsp->recv_jiffies));647651 }648652}649653
-10
arch/x86/kernel/probe_roms.c
···203203 unsigned char c;204204 int i;205205206206- /*207207- * The ROM memory range is not part of the e820 table and is therefore not208208- * pre-validated by BIOS. The kernel page table maps the ROM region as encrypted209209- * memory, and SNP requires encrypted memory to be validated before access.210210- * Do that here.211211- */212212- snp_prep_memory(video_rom_resource.start,213213- ((system_rom_resource.end + 1) - video_rom_resource.start),214214- SNP_PAGE_STATE_PRIVATE);215215-216206 /* video rom */217207 upper = adapter_rom_resources[0].start;218208 for (start = video_rom_resource.start; start < upper; start += 2048) {
+1-2
arch/x86/kernel/setup.c
···99#include <linux/console.h>1010#include <linux/crash_dump.h>1111#include <linux/dma-map-ops.h>1212-#include <linux/dmi.h>1312#include <linux/efi.h>1413#include <linux/ima.h>1514#include <linux/init_ohci1394_dma.h>···901902 efi_init();902903903904 reserve_ibft_region();904904- dmi_setup();905905+ x86_init.resources.dmi_setup();905906906907 /*907908 * VMware detection requires dmi to be available, so this
+12-15
arch/x86/kernel/sev.c
···2323#include <linux/platform_device.h>2424#include <linux/io.h>2525#include <linux/psp-sev.h>2626+#include <linux/dmi.h>2627#include <uapi/linux/sev-guest.h>27282829#include <asm/init.h>···794793795794 /* Ask hypervisor to mark the memory pages shared in the RMP table. */796795 early_set_pages_state(vaddr, paddr, npages, SNP_PAGE_STATE_SHARED);797797-}798798-799799-void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op)800800-{801801- unsigned long vaddr, npages;802802-803803- vaddr = (unsigned long)__va(paddr);804804- npages = PAGE_ALIGN(sz) >> PAGE_SHIFT;805805-806806- if (op == SNP_PAGE_STATE_PRIVATE)807807- early_snp_set_memory_private(vaddr, paddr, npages);808808- else if (op == SNP_PAGE_STATE_SHARED)809809- early_snp_set_memory_shared(vaddr, paddr, npages);810810- else811811- WARN(1, "invalid memory op %d\n", op);812796}813797814798static unsigned long __set_pages_state(struct snp_psc_desc *data, unsigned long vaddr,···21202134void __head __noreturn snp_abort(void)21212135{21222136 sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);21372137+}21382138+21392139+/*21402140+ * SEV-SNP guests should only execute dmi_setup() if EFI_CONFIG_TABLES are21412141+ * enabled, as the alternative (fallback) logic for DMI probing in the legacy21422142+ * ROM region can cause a crash since this region is not pre-validated.21432143+ */21442144+void __init snp_dmi_setup(void)21452145+{21462146+ if (efi_enabled(EFI_CONFIG_TABLES))21472147+ dmi_setup();21232148}2124214921252150static void dump_cpuid_table(void)
···163163 lfence164164 jmp srso_alias_return_thunk165165SYM_FUNC_END(srso_alias_untrain_ret)166166+__EXPORT_THUNK(srso_alias_untrain_ret)166167 .popsection167168168169 .pushsection .text..__x86.rethunk_safe···225224SYM_CODE_END(srso_return_thunk)226225227226#define JMP_SRSO_UNTRAIN_RET "jmp srso_untrain_ret"228228-#define JMP_SRSO_ALIAS_UNTRAIN_RET "jmp srso_alias_untrain_ret"229227#else /* !CONFIG_MITIGATION_SRSO */228228+/* Dummy for the alternative in CALL_UNTRAIN_RET. */229229+SYM_CODE_START(srso_alias_untrain_ret)230230+ RET231231+SYM_FUNC_END(srso_alias_untrain_ret)230232#define JMP_SRSO_UNTRAIN_RET "ud2"231231-#define JMP_SRSO_ALIAS_UNTRAIN_RET "ud2"232233#endif /* CONFIG_MITIGATION_SRSO */233234234235#ifdef CONFIG_MITIGATION_UNRET_ENTRY···322319#if defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_MITIGATION_SRSO)323320324321SYM_FUNC_START(entry_untrain_ret)325325- ALTERNATIVE_2 JMP_RETBLEED_UNTRAIN_RET, \326326- JMP_SRSO_UNTRAIN_RET, X86_FEATURE_SRSO, \327327- JMP_SRSO_ALIAS_UNTRAIN_RET, X86_FEATURE_SRSO_ALIAS322322+ ALTERNATIVE JMP_RETBLEED_UNTRAIN_RET, JMP_SRSO_UNTRAIN_RET, X86_FEATURE_SRSO328323SYM_FUNC_END(entry_untrain_ret)329324__EXPORT_THUNK(entry_untrain_ret)330325
+5-18
arch/x86/mm/ident_map.c
···2626 for (; addr < end; addr = next) {2727 pud_t *pud = pud_page + pud_index(addr);2828 pmd_t *pmd;2929- bool use_gbpage;30293130 next = (addr & PUD_MASK) + PUD_SIZE;3231 if (next > end)3332 next = end;34333535- /* if this is already a gbpage, this portion is already mapped */3636- if (pud_leaf(*pud))3737- continue;3838-3939- /* Is using a gbpage allowed? */4040- use_gbpage = info->direct_gbpages;4141-4242- /* Don't use gbpage if it maps more than the requested region. */4343- /* at the begining: */4444- use_gbpage &= ((addr & ~PUD_MASK) == 0);4545- /* ... or at the end: */4646- use_gbpage &= ((next & ~PUD_MASK) == 0);4747-4848- /* Never overwrite existing mappings */4949- use_gbpage &= !pud_present(*pud);5050-5151- if (use_gbpage) {3434+ if (info->direct_gbpages) {5235 pud_t pudval;53363737+ if (pud_present(*pud))3838+ continue;3939+4040+ addr &= PUD_MASK;5441 pudval = __pud((addr - info->offset) | info->page_flag);5542 set_pud(pud, pudval);5643 continue;
+18
arch/x86/mm/mem_encrypt_amd.c
···492492 */493493 if (sev_status & MSR_AMD64_SEV_ENABLED)494494 ia32_disable();495495+496496+ /*497497+ * Override init functions that scan the ROM region in SEV-SNP guests,498498+ * as this memory is not pre-validated and would thus cause a crash.499499+ */500500+ if (sev_status & MSR_AMD64_SEV_SNP_ENABLED) {501501+ x86_init.mpparse.find_mptable = x86_init_noop;502502+ x86_init.pci.init_irq = x86_init_noop;503503+ x86_init.resources.probe_roms = x86_init_noop;504504+505505+ /*506506+ * DMI setup behavior for SEV-SNP guests depends on507507+ * efi_enabled(EFI_CONFIG_TABLES), which hasn't been508508+ * parsed yet. snp_dmi_setup() will run after that509509+ * parsing has happened.510510+ */511511+ x86_init.resources.dmi_setup = snp_dmi_setup;512512+ }495513}496514497515void __init mem_encrypt_free_decrypted_mem(void)
···726726 * which can be mixed are set in each bio and mark @rq as mixed727727 * merged.728728 */729729-void blk_rq_set_mixed_merge(struct request *rq)729729+static void blk_rq_set_mixed_merge(struct request *rq)730730{731731 blk_opf_t ff = rq->cmd_flags & REQ_FAILFAST_MASK;732732 struct bio *bio;
+2-7
block/blk-mq.c
···770770 /*771771 * Partial zone append completions cannot be supported as the772772 * BIO fragments may end up not being written sequentially.773773- * For such case, force the completed nbytes to be equal to774774- * the BIO size so that bio_advance() sets the BIO remaining775775- * size to 0 and we end up calling bio_endio() before returning.776773 */777777- if (bio->bi_iter.bi_size != nbytes) {774774+ if (bio->bi_iter.bi_size != nbytes)778775 bio->bi_status = BLK_STS_IOERR;779779- nbytes = bio->bi_iter.bi_size;780780- } else {776776+ else781777 bio->bi_iter.bi_sector = rq->__sector;782782- }783778 }784779785780 bio_advance(bio, nbytes);
···115115 * Sign the specified data blob using the private key specified by params->key.116116 * The signature is wrapped in an encoding if params->encoding is specified117117 * (eg. "pkcs1"). If the encoding needs to know the digest type, this can be118118- * passed through params->hash_algo (eg. "sha512").118118+ * passed through params->hash_algo (eg. "sha1").119119 *120120 * Returns the length of the data placed in the signature buffer or an error.121121 */
+8
crypto/asymmetric_keys/x509_cert_parser.c
···198198 default:199199 return -ENOPKG; /* Unsupported combination */200200201201+ case OID_sha1WithRSAEncryption:202202+ ctx->cert->sig->hash_algo = "sha1";203203+ goto rsa_pkcs1;204204+201205 case OID_sha256WithRSAEncryption:202206 ctx->cert->sig->hash_algo = "sha256";203207 goto rsa_pkcs1;···217213 case OID_sha224WithRSAEncryption:218214 ctx->cert->sig->hash_algo = "sha224";219215 goto rsa_pkcs1;216216+217217+ case OID_id_ecdsa_with_sha1:218218+ ctx->cert->sig->hash_algo = "sha1";219219+ goto ecdsa;220220221221 case OID_id_rsassa_pkcs1_v1_5_with_sha3_256:222222 ctx->cert->sig->hash_algo = "sha3-256";
···550550 ACPI_FREE(buffer.pointer);551551552552 buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;553553- acpi_evaluate_object(obj_handle, NULL, NULL, &buffer);554554-553553+ status = acpi_evaluate_object(obj_handle, NULL, NULL, &buffer);554554+ if (ACPI_FAILURE(status)) {555555+ acpi_os_printf("Could Not evaluate object %p\n",556556+ obj_handle);557557+ return (AE_OK);558558+ }555559 /*556560 * Since this is a field unit, surround the output in braces557561 */
···712712 ehc->saved_ncq_enabled |= 1 << devno;713713714714 /* If we are resuming, wake up the device */715715- if (ap->pflags & ATA_PFLAG_RESUMING)715715+ if (ap->pflags & ATA_PFLAG_RESUMING) {716716+ dev->flags |= ATA_DFLAG_RESUMING;716717 ehc->i.dev_action[devno] |= ATA_EH_SET_ACTIVE;718718+ }717719 }718720 }719721···31713169 return 0;3172317031733171 err:31723172+ dev->flags &= ~ATA_DFLAG_RESUMING;31743173 *r_failed_dev = dev;31753174 return rc;31763175}
+9
drivers/ata/libata-scsi.c
···47304730 struct ata_link *link;47314731 struct ata_device *dev;47324732 unsigned long flags;47334733+ bool do_resume;47334734 int ret = 0;4734473547354736 mutex_lock(&ap->scsi_scan_mutex);···47524751 if (scsi_device_get(sdev))47534752 continue;4754475347544754+ do_resume = dev->flags & ATA_DFLAG_RESUMING;47554755+47554756 spin_unlock_irqrestore(ap->lock, flags);47574757+ if (do_resume) {47584758+ ret = scsi_resume_device(sdev);47594759+ if (ret == -EWOULDBLOCK)47604760+ goto unlock;47614761+ dev->flags &= ~ATA_DFLAG_RESUMING;47624762+ }47564763 ret = scsi_rescan_device(sdev);47574764 scsi_device_put(sdev);47584765 spin_lock_irqsave(ap->lock, flags);
+7-3
drivers/crypto/intel/iaa/iaa_crypto_main.c
···806806 return -EINVAL;807807808808 cpus_per_iaa = (nr_nodes * nr_cpus_per_node) / nr_iaa;809809+ if (!cpus_per_iaa)810810+ cpus_per_iaa = 1;809811out:810812 return 0;811813}···823821 }824822 }825823826826- if (nr_iaa)824824+ if (nr_iaa) {827825 cpus_per_iaa = (nr_nodes * nr_cpus_per_node) / nr_iaa;828828- else829829- cpus_per_iaa = 0;826826+ if (!cpus_per_iaa)827827+ cpus_per_iaa = 1;828828+ } else829829+ cpus_per_iaa = 1;830830}831831832832static int wq_table_add_wqs(int iaa, int cpu)
-13
drivers/cxl/Kconfig
···144144 If unsure, or if this kernel is meant for production environments,145145 say N.146146147147-config CXL_PMU148148- tristate "CXL Performance Monitoring Unit"149149- default CXL_BUS150150- depends on PERF_EVENTS151151- help152152- Support performance monitoring as defined in CXL rev 3.0153153- section 13.2: Performance Monitoring. CXL components may have154154- one or more CXL Performance Monitoring Units (CPMUs).155155-156156- Say 'y/m' to enable a driver that will attach to performance157157- monitoring units and provide standard perf based interfaces.158158-159159- If unsure say 'm'.160147endif
+3-3
drivers/dma-buf/st-dma-fence-chain.c
···8484 return -ENOMEM;85858686 chain = mock_chain(NULL, f, 1);8787- if (!chain)8787+ if (chain)8888+ dma_fence_enable_sw_signaling(chain);8989+ else8890 err = -ENOMEM;8989-9090- dma_fence_enable_sw_signaling(chain);91919292 dma_fence_signal(f);9393 dma_fence_put(f);
···10831083 return 0;10841084}1085108510861086+static inline char *make_irq_label(const char *orig)10871087+{10881088+ return kstrdup_and_replace(orig, '/', ':', GFP_KERNEL);10891089+}10901090+10911091+static inline void free_irq_label(const char *label)10921092+{10931093+ kfree(label);10941094+}10951095+10861096static void edge_detector_stop(struct line *line)10871097{10881098 if (line->irq) {10891089- free_irq(line->irq, line);10991099+ free_irq_label(free_irq(line->irq, line));10901100 line->irq = 0;10911101 }10921102···11201110 unsigned long irqflags = 0;11211111 u64 eflags;11221112 int irq, ret;11131113+ char *label;1123111411241115 eflags = edflags & GPIO_V2_LINE_EDGE_FLAGS;11251116 if (eflags && !kfifo_initialized(&line->req->events)) {···11571146 IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;11581147 irqflags |= IRQF_ONESHOT;1159114811491149+ label = make_irq_label(line->req->label);11501150+ if (!label)11511151+ return -ENOMEM;11521152+11601153 /* Request a thread to read the events */11611154 ret = request_threaded_irq(irq, edge_irq_handler, edge_irq_thread,11621162- irqflags, line->req->label, line);11631163- if (ret)11551155+ irqflags, label, line);11561156+ if (ret) {11571157+ free_irq_label(label);11641158 return ret;11591159+ }1165116011661161 line->irq = irq;11671162 return 0;···19901973 blocking_notifier_chain_unregister(&le->gdev->device_notifier,19911974 &le->device_unregistered_nb);19921975 if (le->irq)19931993- free_irq(le->irq, le);19761976+ free_irq_label(free_irq(le->irq, le));19941977 if (le->desc)19951978 gpiod_free(le->desc);19961979 kfree(le->label);···21312114 int fd;21322115 int ret;21332116 int irq, irqflags = 0;21172117+ char *label;2134211821352119 if (copy_from_user(&eventreq, ip, sizeof(eventreq)))21362120 return -EFAULT;···22162198 if (ret)22172199 goto out_free_le;2218220022012201+ label = make_irq_label(le->label);22022202+ if (!label) {22032203+ ret = -ENOMEM;22042204+ goto out_free_le;22052205+ }22062206+22192207 /* Request a thread to read the events */22202208 ret = request_threaded_irq(irq,22212209 lineevent_irq_handler,22222210 lineevent_irq_thread,22232211 irqflags,22242224- le->label,22122212+ label,22252213 le);22262226- if (ret)22142214+ if (ret) {22152215+ free_irq_label(label);22272216 goto out_free_le;22172217+ }2228221822292219 le->irq = irq;22302220
+18-14
drivers/gpio/gpiolib.c
···23972397}23982398EXPORT_SYMBOL_GPL(gpiochip_dup_line_label);2399239924002400+static inline const char *function_name_or_default(const char *con_id)24012401+{24022402+ return con_id ?: "(default)";24032403+}24042404+24002405/**24012406 * gpiochip_request_own_desc - Allow GPIO chip to request its own descriptor24022407 * @gc: GPIO chip···24302425 enum gpiod_flags dflags)24312426{24322427 struct gpio_desc *desc = gpiochip_get_desc(gc, hwnum);24282428+ const char *name = function_name_or_default(label);24332429 int ret;2434243024352431 if (IS_ERR(desc)) {24362436- chip_err(gc, "failed to get GPIO descriptor\n");24322432+ chip_err(gc, "failed to get GPIO %s descriptor\n", name);24372433 return desc;24382434 }24392435···2444243824452439 ret = gpiod_configure_flags(desc, label, lflags, dflags);24462440 if (ret) {24472447- chip_err(gc, "setup of own GPIO %s failed\n", label);24482441 gpiod_free_commit(desc);24422442+ chip_err(gc, "setup of own GPIO %s failed\n", name);24492443 return ERR_PTR(ret);24502444 }24512445···41594153 enum gpiod_flags *flags,41604154 unsigned long *lookupflags)41614155{41564156+ const char *name = function_name_or_default(con_id);41624157 struct gpio_desc *desc = ERR_PTR(-ENOENT);4163415841644159 if (is_of_node(fwnode)) {41654165- dev_dbg(consumer, "using DT '%pfw' for '%s' GPIO lookup\n",41664166- fwnode, con_id);41604160+ dev_dbg(consumer, "using DT '%pfw' for '%s' GPIO lookup\n", fwnode, name);41674161 desc = of_find_gpio(to_of_node(fwnode), con_id, idx, lookupflags);41684162 } else if (is_acpi_node(fwnode)) {41694169- dev_dbg(consumer, "using ACPI '%pfw' for '%s' GPIO lookup\n",41704170- fwnode, con_id);41634163+ dev_dbg(consumer, "using ACPI '%pfw' for '%s' GPIO lookup\n", fwnode, name);41714164 desc = acpi_find_gpio(fwnode, con_id, idx, flags, lookupflags);41724165 } else if (is_software_node(fwnode)) {41734173- dev_dbg(consumer, "using swnode '%pfw' for '%s' GPIO lookup\n",41744174- fwnode, con_id);41664166+ dev_dbg(consumer, "using swnode '%pfw' for '%s' GPIO lookup\n", fwnode, name);41754167 desc = swnode_find_gpio(fwnode, con_id, idx, lookupflags);41764168 }41774169···41854181 bool platform_lookup_allowed)41864182{41874183 unsigned long lookupflags = GPIO_LOOKUP_FLAGS_DEFAULT;41844184+ const char *name = function_name_or_default(con_id);41884185 /*41894186 * scoped_guard() is implemented as a for loop, meaning static41904187 * analyzers will complain about these two not being initialized.···42084203 }4209420442104205 if (IS_ERR(desc)) {42114211- dev_dbg(consumer, "No GPIO consumer %s found\n",42124212- con_id);42064206+ dev_dbg(consumer, "No GPIO consumer %s found\n", name);42134207 return desc;42144208 }42154209···42304226 *42314227 * FIXME: Make this more sane and safe.42324228 */42334233- dev_info(consumer,42344234- "nonexclusive access to GPIO for %s\n", con_id);42294229+ dev_info(consumer, "nonexclusive access to GPIO for %s\n", name);42354230 return desc;42364231 }4237423242384233 ret = gpiod_configure_flags(desc, con_id, lookupflags, flags);42394234 if (ret < 0) {42404240- dev_dbg(consumer, "setup of GPIO %s failed\n", con_id);42414235 gpiod_put(desc);42364236+ dev_dbg(consumer, "setup of GPIO %s failed\n", name);42424237 return ERR_PTR(ret);42434238 }42444239···43534350int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,43544351 unsigned long lflags, enum gpiod_flags dflags)43554352{43534353+ const char *name = function_name_or_default(con_id);43564354 int ret;4357435543584356 if (lflags & GPIO_ACTIVE_LOW)···4397439343984394 /* No particular flag request, return here... */43994395 if (!(dflags & GPIOD_FLAGS_BIT_DIR_SET)) {44004400- gpiod_dbg(desc, "no flags found for %s\n", con_id);43964396+ gpiod_dbg(desc, "no flags found for GPIO %s\n", name);44014397 return 0;44024398 }44034399
+2
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
···45394539 if (r)45404540 goto unprepare;4541454145424542+ flush_delayed_work(&adev->gfx.gfx_off_delay_work);45434543+45424544 for (i = 0; i < adev->num_ip_blocks; i++) {45434545 if (!adev->ip_blocks[i].status.valid)45444546 continue;
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
···22372237{22382238 switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {22392239 case IP_VERSION(4, 0, 5):22402240+ case IP_VERSION(4, 0, 6):22402241 if (amdgpu_umsch_mm & 0x1) {22412242 amdgpu_device_ip_block_add(adev, &umsch_mm_v4_0_ip_block);22422243 adev->enable_umsch_mm = true;
+29-17
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
···524524{525525 struct amdgpu_ring *ring = file_inode(f)->i_private;526526 volatile u32 *mqd;527527- int r;527527+ u32 *kbuf;528528+ int r, i;528529 uint32_t value, result;529530530531 if (*pos & 3 || size & 3)531532 return -EINVAL;532533533533- result = 0;534534+ kbuf = kmalloc(ring->mqd_size, GFP_KERNEL);535535+ if (!kbuf)536536+ return -ENOMEM;534537535538 r = amdgpu_bo_reserve(ring->mqd_obj, false);536539 if (unlikely(r != 0))537537- return r;540540+ goto err_free;538541539542 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&mqd);540540- if (r) {541541- amdgpu_bo_unreserve(ring->mqd_obj);542542- return r;543543- }543543+ if (r)544544+ goto err_unreserve;544545546546+ /*547547+ * Copy to local buffer to avoid put_user(), which might fault548548+ * and acquire mmap_sem, under reservation_ww_class_mutex.549549+ */550550+ for (i = 0; i < ring->mqd_size/sizeof(u32); i++)551551+ kbuf[i] = mqd[i];552552+553553+ amdgpu_bo_kunmap(ring->mqd_obj);554554+ amdgpu_bo_unreserve(ring->mqd_obj);555555+556556+ result = 0;545557 while (size) {546558 if (*pos >= ring->mqd_size)547547- goto done;559559+ break;548560549549- value = mqd[*pos/4];561561+ value = kbuf[*pos/4];550562 r = put_user(value, (uint32_t *)buf);551563 if (r)552552- goto done;564564+ goto err_free;553565 buf += 4;554566 result += 4;555567 size -= 4;556568 *pos += 4;557569 }558570559559-done:560560- amdgpu_bo_kunmap(ring->mqd_obj);561561- mqd = NULL;562562- amdgpu_bo_unreserve(ring->mqd_obj);563563- if (r)564564- return r;565565-571571+ kfree(kbuf);566572 return result;573573+574574+err_unreserve:575575+ amdgpu_bo_unreserve(ring->mqd_obj);576576+err_free:577577+ kfree(kbuf);578578+ return r;567579}568580569581static const struct file_operations amdgpu_debugfs_mqd_fops = {
···396396 struct amdgpu_vpe *vpe = &adev->vpe;397397 int ret;398398399399+ /* Power on VPE */400400+ ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE,401401+ AMD_PG_STATE_UNGATE);402402+ if (ret)403403+ return ret;404404+399405 ret = vpe_load_microcode(vpe);400406 if (ret)401407 return ret;
···2323# Makefile for the 'controller' sub-component of DAL.2424# It provides the control and status of HW CRTC block.25252626-CFLAGS_$(AMDDALPATH)/dc/dce110/dce110_resource.o = $(call cc-disable-warning, override-init)2626+CFLAGS_$(AMDDALPATH)/dc/dce110/dce110_resource.o = -Wno-override-init27272828DCE110 = dce110_timing_generator.o \2929dce110_compressor.o dce110_opp_regamma_v.o \
+1-1
drivers/gpu/drm/amd/display/dc/dce112/Makefile
···2323# Makefile for the 'controller' sub-component of DAL.2424# It provides the control and status of HW CRTC block.25252626-CFLAGS_$(AMDDALPATH)/dc/dce112/dce112_resource.o = $(call cc-disable-warning, override-init)2626+CFLAGS_$(AMDDALPATH)/dc/dce112/dce112_resource.o = -Wno-override-init27272828DCE112 = dce112_compressor.o2929
+1-1
drivers/gpu/drm/amd/display/dc/dce120/Makefile
···2424# It provides the control and status of HW CRTC block.252526262727-CFLAGS_$(AMDDALPATH)/dc/dce120/dce120_resource.o = $(call cc-disable-warning, override-init)2727+CFLAGS_$(AMDDALPATH)/dc/dce120/dce120_resource.o = -Wno-override-init28282929DCE120 = dce120_timing_generator.o3030
+1-1
drivers/gpu/drm/amd/display/dc/dce60/Makefile
···2323# Makefile for the 'controller' sub-component of DAL.2424# It provides the control and status of HW CRTC block.25252626-CFLAGS_$(AMDDALPATH)/dc/dce60/dce60_resource.o = $(call cc-disable-warning, override-init)2626+CFLAGS_$(AMDDALPATH)/dc/dce60/dce60_resource.o = -Wno-override-init27272828DCE60 = dce60_timing_generator.o dce60_hw_sequencer.o \2929 dce60_resource.o
+1-1
drivers/gpu/drm/amd/display/dc/dce80/Makefile
···2323# Makefile for the 'controller' sub-component of DAL.2424# It provides the control and status of HW CRTC block.25252626-CFLAGS_$(AMDDALPATH)/dc/dce80/dce80_resource.o = $(call cc-disable-warning, override-init)2626+CFLAGS_$(AMDDALPATH)/dc/dce80/dce80_resource.o = -Wno-override-init27272828DCE80 = dce80_timing_generator.o2929
+32-22
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
···4444#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))454546464747+void mpc3_mpc_init(struct mpc *mpc)4848+{4949+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);5050+ int opp_id;5151+5252+ mpc1_mpc_init(mpc);5353+5454+ for (opp_id = 0; opp_id < MAX_OPP; opp_id++) {5555+ if (REG(MUX[opp_id]))5656+ /* disable mpc out rate and flow control */5757+ REG_UPDATE_2(MUX[opp_id], MPC_OUT_RATE_CONTROL_DISABLE,5858+ 1, MPC_OUT_FLOW_CONTROL_COUNT, 0);5959+ }6060+}6161+6262+void mpc3_mpc_init_single_inst(struct mpc *mpc, unsigned int mpcc_id)6363+{6464+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);6565+6666+ mpc1_mpc_init_single_inst(mpc, mpcc_id);6767+6868+ /* assuming mpc out mux is connected to opp with the same index at this6969+ * point in time (e.g. transitioning from vbios to driver)7070+ */7171+ if (mpcc_id < MAX_OPP && REG(MUX[mpcc_id]))7272+ /* disable mpc out rate and flow control */7373+ REG_UPDATE_2(MUX[mpcc_id], MPC_OUT_RATE_CONTROL_DISABLE,7474+ 1, MPC_OUT_FLOW_CONTROL_COUNT, 0);7575+}7676+4777bool mpc3_is_dwb_idle(4878 struct mpc *mpc,4979 int dwb_id)···1087810979 REG_SET(DWB_MUX[dwb_id], 0,11080 MPC_DWB0_MUX, 0xf);111111-}112112-113113-void mpc3_set_out_rate_control(114114- struct mpc *mpc,115115- int opp_id,116116- bool enable,117117- bool rate_2x_mode,118118- struct mpc_dwb_flow_control *flow_control)119119-{120120- struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);121121-122122- REG_UPDATE_2(MUX[opp_id],123123- MPC_OUT_RATE_CONTROL_DISABLE, !enable,124124- MPC_OUT_RATE_CONTROL, rate_2x_mode);125125-126126- if (flow_control)127127- REG_UPDATE_2(MUX[opp_id],128128- MPC_OUT_FLOW_CONTROL_MODE, flow_control->flow_ctrl_mode,129129- MPC_OUT_FLOW_CONTROL_COUNT, flow_control->flow_ctrl_cnt1);13081}1318213283enum dc_lut_mode mpc3_get_ogam_current(struct mpc *mpc, int mpcc_id)···15011490 .read_mpcc_state = mpc3_read_mpcc_state,15021491 .insert_plane = mpc1_insert_plane,15031492 .remove_mpcc = mpc1_remove_mpcc,15041504- .mpc_init = mpc1_mpc_init,15051505- .mpc_init_single_inst = mpc1_mpc_init_single_inst,14931493+ .mpc_init = mpc3_mpc_init,14941494+ .mpc_init_single_inst = mpc3_mpc_init_single_inst,15061495 .update_blending = mpc2_update_blending,15071496 .cursor_lock = mpc1_cursor_lock,15081497 .get_mpcc_for_dpp = mpc1_get_mpcc_for_dpp,···15191508 .set_dwb_mux = mpc3_set_dwb_mux,15201509 .disable_dwb_mux = mpc3_disable_dwb_mux,15211510 .is_dwb_idle = mpc3_is_dwb_idle,15221522- .set_out_rate_control = mpc3_set_out_rate_control,15231511 .set_gamut_remap = mpc3_set_gamut_remap,15241512 .program_shaper = mpc3_program_shaper,15251513 .acquire_rmu = mpcc3_acquire_rmu,
+7-7
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h
···10071007 int num_mpcc,10081008 int num_rmu);1009100910101010+void mpc3_mpc_init(10111011+ struct mpc *mpc);10121012+10131013+void mpc3_mpc_init_single_inst(10141014+ struct mpc *mpc,10151015+ unsigned int mpcc_id);10161016+10101017bool mpc3_program_shaper(10111018 struct mpc *mpc,10121019 const struct pwl_params *params,···10841077bool mpc3_is_dwb_idle(10851078 struct mpc *mpc,10861079 int dwb_id);10871087-10881088-void mpc3_set_out_rate_control(10891089- struct mpc *mpc,10901090- int opp_id,10911091- bool enable,10921092- bool rate_2x_mode,10931093- struct mpc_dwb_flow_control *flow_control);1094108010951081void mpc3_power_on_ogam_lut(10961082 struct mpc *mpc, int mpcc_id,
···147147 }148148149149 /* VSC packet set to 4 for PSR-SU, or 2 for PSR1 */150150- if (stream->link->psr_settings.psr_feature_enabled) {151151- if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)152152- vsc_packet_revision = vsc_packet_rev4;153153- else if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)154154- vsc_packet_revision = vsc_packet_rev2;155155- }156156-157157- if (stream->link->replay_settings.config.replay_supported)150150+ if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)158151 vsc_packet_revision = vsc_packet_rev4;152152+ else if (stream->link->replay_settings.config.replay_supported)153153+ vsc_packet_revision = vsc_packet_rev4;154154+ else if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)155155+ vsc_packet_revision = vsc_packet_rev2;159156160157 /* Update to revision 5 for extended colorimetry support */161158 if (stream->use_vsc_sdp_for_colorimetry)
···5454#define PPSMC_MSG_TestMessage 0x01 ///< To check if PMFW is alive and responding. Requirement specified by PMFW team5555#define PPSMC_MSG_GetPmfwVersion 0x02 ///< Get PMFW version5656#define PPSMC_MSG_GetDriverIfVersion 0x03 ///< Get PMFW_DRIVER_IF version5757-#define PPSMC_MSG_SPARE0 0x04 ///< SPARE5858-#define PPSMC_MSG_SPARE1 0x05 ///< SPARE5959-#define PPSMC_MSG_PowerDownVcn 0x06 ///< Power down VCN6060-#define PPSMC_MSG_PowerUpVcn 0x07 ///< Power up VCN; VCN is power gated by default6161-#define PPSMC_MSG_SetHardMinVcn 0x08 ///< For wireless display5757+#define PPSMC_MSG_PowerDownVcn1 0x04 ///< Power down VCN15858+#define PPSMC_MSG_PowerUpVcn1 0x05 ///< Power up VCN1; VCN1 is power gated by default5959+#define PPSMC_MSG_PowerDownVcn0 0x06 ///< Power down VCN06060+#define PPSMC_MSG_PowerUpVcn0 0x07 ///< Power up VCN0; VCN0 is power gated by default6161+#define PPSMC_MSG_SetHardMinVcn0 0x08 ///< For wireless display6262#define PPSMC_MSG_SetSoftMinGfxclk 0x09 ///< Set SoftMin for GFXCLK, argument is frequency in MHz6363-#define PPSMC_MSG_SPARE2 0x0A ///< SPARE6464-#define PPSMC_MSG_SPARE3 0x0B ///< SPARE6363+#define PPSMC_MSG_SetHardMinVcn1 0x0A ///< For wireless display6464+#define PPSMC_MSG_SetSoftMinVcn1 0x0B ///< Set soft min for VCN1 clocks (VCLK1 and DCLK1)6565#define PPSMC_MSG_PrepareMp1ForUnload 0x0C ///< Prepare PMFW for GFX driver unload6666#define PPSMC_MSG_SetDriverDramAddrHigh 0x0D ///< Set high 32 bits of DRAM address for Driver table transfer6767#define PPSMC_MSG_SetDriverDramAddrLow 0x0E ///< Set low 32 bits of DRAM address for Driver table transfer···7171#define PPSMC_MSG_GetEnabledSmuFeatures 0x12 ///< Get enabled features in PMFW7272#define PPSMC_MSG_SetHardMinSocclkByFreq 0x13 ///< Set hard min for SOC CLK7373#define PPSMC_MSG_SetSoftMinFclk 0x14 ///< Set hard min for FCLK7474-#define PPSMC_MSG_SetSoftMinVcn 0x15 ///< Set soft min for VCN clocks (VCLK and DCLK)7474+#define PPSMC_MSG_SetSoftMinVcn0 0x15 ///< Set soft min for VCN0 clocks (VCLK0 and DCLK0)75757676#define PPSMC_MSG_EnableGfxImu 0x16 ///< Enable GFX IMU7777···84848585#define PPSMC_MSG_SetSoftMaxSocclkByFreq 0x1D ///< Set soft max for SOC CLK8686#define PPSMC_MSG_SetSoftMaxFclkByFreq 0x1E ///< Set soft max for FCLK8787-#define PPSMC_MSG_SetSoftMaxVcn 0x1F ///< Set soft max for VCN clocks (VCLK and DCLK)8787+#define PPSMC_MSG_SetSoftMaxVcn0 0x1F ///< Set soft max for VCN0 clocks (VCLK0 and DCLK0)8888#define PPSMC_MSG_spare_0x20 0x208989-#define PPSMC_MSG_PowerDownJpeg 0x21 ///< Power down Jpeg9090-#define PPSMC_MSG_PowerUpJpeg 0x22 ///< Power up Jpeg; VCN is power gated by default8989+#define PPSMC_MSG_PowerDownJpeg0 0x21 ///< Power down Jpeg of VCN09090+#define PPSMC_MSG_PowerUpJpeg0 0x22 ///< Power up Jpeg of VCN0; VCN0 is power gated by default91919292#define PPSMC_MSG_SetHardMinFclkByFreq 0x23 ///< Set hard min for FCLK9393#define PPSMC_MSG_SetSoftMinSocclkByFreq 0x24 ///< Set soft min for SOC CLK9494#define PPSMC_MSG_AllowZstates 0x25 ///< Inform PMFM of allowing Zstate entry, i.e. no Miracast activity9595-#define PPSMC_MSG_Reserved 0x26 ///< Not used9696-#define PPSMC_MSG_Reserved1 0x27 ///< Not used, previously PPSMC_MSG_RequestActiveWgp9797-#define PPSMC_MSG_Reserved2 0x28 ///< Not used, previously PPSMC_MSG_QueryActiveWgp9595+#define PPSMC_MSG_PowerDownJpeg1 0x26 ///< Power down Jpeg of VCN19696+#define PPSMC_MSG_PowerUpJpeg1 0x27 ///< Power up Jpeg of VCN1; VCN1 is power gated by default9797+#define PPSMC_MSG_SetSoftMaxVcn1 0x28 ///< Set soft max for VCN1 clocks (VCLK1 and DCLK1)9898#define PPSMC_MSG_PowerDownIspByTile 0x29 ///< ISP is power gated by default9999#define PPSMC_MSG_PowerUpIspByTile 0x2A ///< This message is used to power up ISP tiles and enable the ISP DPM100100#define PPSMC_MSG_SetHardMinIspiclkByFreq 0x2B ///< Set HardMin by frequency for ISPICLK
···19551955 * these devices we split the init OTP sequence into a deassert sequence and19561956 * the actual init OTP part.19571957 */19581958-static void fixup_mipi_sequences(struct drm_i915_private *i915,19591959- struct intel_panel *panel)19581958+static void vlv_fixup_mipi_sequences(struct drm_i915_private *i915,19591959+ struct intel_panel *panel)19601960{19611961 u8 *init_otp;19621962 int len;19631963-19641964- /* Limit this to VLV for now. */19651965- if (!IS_VALLEYVIEW(i915))19661966- return;1967196319681964 /* Limit this to v1 vid-mode sequences */19691965 if (panel->vbt.dsi.config->is_cmd_mode ||···19941998 init_otp[len - 1] = MIPI_SEQ_INIT_OTP;19951999 /* And make MIPI_MIPI_SEQ_INIT_OTP point to it */19962000 panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1;20012001+}20022002+20032003+/*20042004+ * Some machines (eg. Lenovo 82TQ) appear to have broken20052005+ * VBT sequences:20062006+ * - INIT_OTP is not present at all20072007+ * - what should be in INIT_OTP is in DISPLAY_ON20082008+ * - what should be in DISPLAY_ON is in BACKLIGHT_ON20092009+ * (along with the actual backlight stuff)20102010+ *20112011+ * To make those work we simply swap DISPLAY_ON and INIT_OTP.20122012+ *20132013+ * TODO: Do we need to limit this to specific machines,20142014+ * or examine the contents of the sequences to20152015+ * avoid false positives?20162016+ */20172017+static void icl_fixup_mipi_sequences(struct drm_i915_private *i915,20182018+ struct intel_panel *panel)20192019+{20202020+ if (!panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] &&20212021+ panel->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON]) {20222022+ drm_dbg_kms(&i915->drm, "Broken VBT: Swapping INIT_OTP and DISPLAY_ON sequences\n");20232023+20242024+ swap(panel->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP],20252025+ panel->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON]);20262026+ }20272027+}20282028+20292029+static void fixup_mipi_sequences(struct drm_i915_private *i915,20302030+ struct intel_panel *panel)20312031+{20322032+ if (DISPLAY_VER(i915) >= 11)20332033+ icl_fixup_mipi_sequences(i915, panel);20342034+ else if (IS_VALLEYVIEW(i915))20352035+ vlv_fixup_mipi_sequences(i915, panel);19972036}1998203719992038static void···33813350bool intel_bios_encoder_supports_dp_dual_mode(const struct intel_bios_encoder_data *devdata)33823351{33833352 const struct child_device_config *child = &devdata->child;33533353+33543354+ if (!devdata)33553355+ return false;3384335633853357 if (!intel_bios_encoder_supports_dp(devdata) ||33863358 !intel_bios_encoder_supports_hdmi(devdata))
+1-3
drivers/gpu/drm/i915/display/intel_cursor.c
···3636{3737 struct drm_i915_private *dev_priv =3838 to_i915(plane_state->uapi.plane->dev);3939- const struct drm_framebuffer *fb = plane_state->hw.fb;4040- struct drm_i915_gem_object *obj = intel_fb_obj(fb);4139 u32 base;42404341 if (DISPLAY_INFO(dev_priv)->cursor_needs_physical)4444- base = i915_gem_object_get_dma_address(obj, 0);4242+ base = plane_state->phys_dma_addr;4543 else4644 base = intel_plane_ggtt_offset(plane_state);4745
···255255 return PTR_ERR(vma);256256257257 plane_state->ggtt_vma = vma;258258+259259+ /*260260+ * Pre-populate the dma address before we enter the vblank261261+ * evade critical section as i915_gem_object_get_dma_address()262262+ * will trigger might_sleep() even if it won't actually sleep,263263+ * which is the case when the fb has already been pinned.264264+ */265265+ if (phys_cursor)266266+ plane_state->phys_dma_addr =267267+ i915_gem_object_get_dma_address(intel_fb_obj(fb), 0);258268 } else {259269 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);260270
···187187 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;188188189189 /*190190- * TRANS_SET_CONTEXT_LATENCY with VRR enabled191191- * requires this chicken bit on ADL/DG2.190190+ * This bit seems to have two meanings depending on the platform:191191+ * TGL: generate VRR "safe window" for DSB vblank waits192192+ * ADL/DG2: make TRANS_SET_CONTEXT_LATENCY effective with VRR192193 */193193- if (DISPLAY_VER(dev_priv) == 13)194194+ if (IS_DISPLAY_VER(dev_priv, 12, 13))194195 intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),195196 0, PIPE_VBLANK_WITH_DELAY);196197
···22952295 if (HAS_4TILE(i915))22962296 caps |= INTEL_PLANE_CAP_TILING_4;2297229722982298+ if (!IS_ENABLED(I915) && !HAS_FLAT_CCS(i915))22992299+ return caps;23002300+22982301 if (skl_plane_has_rc_ccs(i915, pipe, plane_id)) {22992302 caps |= INTEL_PLANE_CAP_CCS_RC;23002303 if (DISPLAY_VER(i915) >= 12)
-3
drivers/gpu/drm/i915/gt/intel_engine_pm.c
···279279 intel_engine_park_heartbeat(engine);280280 intel_breadcrumbs_park(engine->breadcrumbs);281281282282- /* Must be reset upon idling, or we may miss the busy wakeup. */283283- GEM_BUG_ON(engine->sched_engine->queue_priority_hint != INT_MIN);284284-285282 if (engine->park)286283 engine->park(engine);287284
···32723272{32733273 cancel_timer(&engine->execlists.timer);32743274 cancel_timer(&engine->execlists.preempt);32753275+32763276+ /* Reset upon idling, or we may delay the busy wakeup. */32773277+ WRITE_ONCE(engine->sched_engine->queue_priority_hint, INT_MIN);32753278}3276327932773280static void add_to_engine(struct i915_request *rq)
···800800 goto out_cleanup_modeset2;801801802802 ret = intel_pxp_init(i915);803803- if (ret != -ENODEV)803803+ if (ret && ret != -ENODEV)804804 drm_dbg(&i915->drm, "pxp init failed with %d\n", ret);805805806806 ret = intel_display_driver_probe(i915);
···3434#include "gt/intel_engine.h"3535#include "gt/intel_engine_heartbeat.h"3636#include "gt/intel_gt.h"3737+#include "gt/intel_gt_pm.h"3738#include "gt/intel_gt_requests.h"3839#include "gt/intel_tlb.h"3940···104103105104static int __i915_vma_active(struct i915_active *ref)106105{107107- return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;106106+ struct i915_vma *vma = active_to_vma(ref);107107+108108+ if (!i915_vma_tryget(vma))109109+ return -ENOENT;110110+111111+ /*112112+ * Exclude global GTT VMA from holding a GT wakeref113113+ * while active, otherwise GPU never goes idle.114114+ */115115+ if (!i915_vma_is_ggtt(vma)) {116116+ /*117117+ * Since we and our _retire() counterpart can be118118+ * called asynchronously, storing a wakeref tracking119119+ * handle inside struct i915_vma is not safe, and120120+ * there is no other good place for that. Hence,121121+ * use untracked variants of intel_gt_pm_get/put().122122+ */123123+ intel_gt_pm_get_untracked(vma->vm->gt);124124+ }125125+126126+ return 0;108127}109128110129static void __i915_vma_retire(struct i915_active *ref)111130{112112- i915_vma_put(active_to_vma(ref));131131+ struct i915_vma *vma = active_to_vma(ref);132132+133133+ if (!i915_vma_is_ggtt(vma)) {134134+ /*135135+ * Since we can be called from atomic contexts,136136+ * use an async variant of intel_gt_pm_put().137137+ */138138+ intel_gt_pm_put_async_untracked(vma->vm->gt);139139+ }140140+141141+ i915_vma_put(vma);113142}114143115144static struct i915_vma *···14351404 struct i915_vma_work *work = NULL;14361405 struct dma_fence *moving = NULL;14371406 struct i915_vma_resource *vma_res = NULL;14381438- intel_wakeref_t wakeref = 0;14071407+ intel_wakeref_t wakeref;14391408 unsigned int bound;14401409 int err;14411410···14551424 if (err)14561425 return err;1457142614581458- if (flags & PIN_GLOBAL)14591459- wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);14271427+ /*14281428+ * In case of a global GTT, we must hold a runtime-pm wakeref14291429+ * while global PTEs are updated. In other cases, we hold14301430+ * the rpm reference while the VMA is active. Since runtime14311431+ * resume may require allocations, which are forbidden inside14321432+ * vm->mutex, get the first rpm wakeref outside of the mutex.14331433+ */14341434+ wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);1460143514611436 if (flags & vma->vm->bind_async_flags) {14621437 /* lock VM */···15981561 if (work)15991562 dma_fence_work_commit_imm(&work->base);16001563err_rpm:16011601- if (wakeref)16021602- intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);15641564+ intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);1603156516041566 if (moving)16051567 dma_fence_put(moving);
···7171 entity->guilty = guilty;7272 entity->num_sched_list = num_sched_list;7373 entity->priority = priority;7474+ /*7575+ * It's perfectly valid to initialize an entity without having a valid7676+ * scheduler attached. It's just not valid to use the scheduler before it7777+ * is initialized itself.7878+ */7479 entity->sched_list = num_sched_list > 1 ? sched_list : NULL;7580 RCU_INIT_POINTER(entity->last_scheduled, NULL);7681 RB_CLEAR_NODE(&entity->rb_tree_node);77827878- if (!sched_list[0]->sched_rq) {7979- /* Warn drivers not to do this and to fix their DRM8080- * calling order.8383+ if (num_sched_list && !sched_list[0]->sched_rq) {8484+ /* Since every entry covered by num_sched_list8585+ * should be non-NULL and therefore we warn drivers8686+ * not to do this and to fix their DRM calling order.8187 */8288 pr_warn("%s: called with uninitialized scheduler\n", __func__);8389 } else if (num_sched_list) {
···172172 -Ddrm_i915_gem_object=xe_bo \173173 -Ddrm_i915_private=xe_device174174175175-CFLAGS_i915-display/intel_fbdev.o = $(call cc-disable-warning, override-init)176176-CFLAGS_i915-display/intel_display_device.o = $(call cc-disable-warning, override-init)175175+CFLAGS_i915-display/intel_fbdev.o = -Wno-override-init176176+CFLAGS_i915-display/intel_display_device.o = -Wno-override-init177177178178# Rule to build SOC code shared with i915179179$(obj)/i915-soc/%.o: $(srctree)/drivers/gpu/drm/i915/soc/%.c FORCE
+9-50
drivers/gpu/drm/xe/xe_bo.c
···144144 .mem_type = XE_PL_TT,145145 };146146 *c += 1;147147-148148- if (bo->props.preferred_mem_type == XE_BO_PROPS_INVALID)149149- bo->props.preferred_mem_type = XE_PL_TT;150147 }151148}152149···178181 }179182 places[*c] = place;180183 *c += 1;181181-182182- if (bo->props.preferred_mem_type == XE_BO_PROPS_INVALID)183183- bo->props.preferred_mem_type = mem_type;184184}185185186186static void try_add_vram(struct xe_device *xe, struct xe_bo *bo,187187 u32 bo_flags, u32 *c)188188{189189- if (bo->props.preferred_gt == XE_GT1) {190190- if (bo_flags & XE_BO_CREATE_VRAM1_BIT)191191- add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c);192192- if (bo_flags & XE_BO_CREATE_VRAM0_BIT)193193- add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c);194194- } else {195195- if (bo_flags & XE_BO_CREATE_VRAM0_BIT)196196- add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c);197197- if (bo_flags & XE_BO_CREATE_VRAM1_BIT)198198- add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c);199199- }189189+ if (bo_flags & XE_BO_CREATE_VRAM0_BIT)190190+ add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c);191191+ if (bo_flags & XE_BO_CREATE_VRAM1_BIT)192192+ add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c);200193}201194202195static void try_add_stolen(struct xe_device *xe, struct xe_bo *bo,···210223{211224 u32 c = 0;212225213213- bo->props.preferred_mem_type = XE_BO_PROPS_INVALID;214214-215215- /* The order of placements should indicate preferred location */216216-217217- if (bo->props.preferred_mem_class == DRM_XE_MEM_REGION_CLASS_SYSMEM) {218218- try_add_system(xe, bo, bo_flags, &c);219219- try_add_vram(xe, bo, bo_flags, &c);220220- } else {221221- try_add_vram(xe, bo, bo_flags, &c);222222- try_add_system(xe, bo, bo_flags, &c);223223- }226226+ try_add_vram(xe, bo, bo_flags, &c);227227+ try_add_system(xe, bo, bo_flags, &c);224228 try_add_stolen(xe, bo, bo_flags, &c);225229226230 if (!c)···11041126 }11051127}1106112811071107-static bool should_migrate_to_system(struct xe_bo *bo)11081108-{11091109- struct xe_device *xe = xe_bo_device(bo);11101110-11111111- return xe_device_in_fault_mode(xe) && bo->props.cpu_atomic;11121112-}11131113-11141129static vm_fault_t xe_gem_fault(struct vm_fault *vmf)11151130{11161131 struct ttm_buffer_object *tbo = vmf->vma->vm_private_data;···11121141 struct xe_bo *bo = ttm_to_xe_bo(tbo);11131142 bool needs_rpm = bo->flags & XE_BO_CREATE_VRAM_MASK;11141143 vm_fault_t ret;11151115- int idx, r = 0;11441144+ int idx;1116114511171146 if (needs_rpm)11181147 xe_device_mem_access_get(xe);···11241153 if (drm_dev_enter(ddev, &idx)) {11251154 trace_xe_bo_cpu_fault(bo);1126115511271127- if (should_migrate_to_system(bo)) {11281128- r = xe_bo_migrate(bo, XE_PL_TT);11291129- if (r == -EBUSY || r == -ERESTARTSYS || r == -EINTR)11301130- ret = VM_FAULT_NOPAGE;11311131- else if (r)11321132- ret = VM_FAULT_SIGBUS;11331133- }11341134- if (!ret)11351135- ret = ttm_bo_vm_fault_reserved(vmf,11361136- vmf->vma->vm_page_prot,11371137- TTM_BO_VM_NUM_PREFAULT);11561156+ ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,11571157+ TTM_BO_VM_NUM_PREFAULT);11381158 drm_dev_exit(idx);11391159 } else {11401160 ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);···12531291 bo->flags = flags;12541292 bo->cpu_caching = cpu_caching;12551293 bo->ttm.base.funcs = &xe_gem_object_funcs;12561256- bo->props.preferred_mem_class = XE_BO_PROPS_INVALID;12571257- bo->props.preferred_gt = XE_BO_PROPS_INVALID;12581258- bo->props.preferred_mem_type = XE_BO_PROPS_INVALID;12591294 bo->ttm.priority = XE_BO_PRIORITY_NORMAL;12601295 INIT_LIST_HEAD(&bo->pinned_link);12611296#ifdef CONFIG_PROC_FS
-19
drivers/gpu/drm/xe/xe_bo_types.h
···5656 */5757 struct list_head client_link;5858#endif5959- /** @props: BO user controlled properties */6060- struct {6161- /** @preferred_mem: preferred memory class for this BO */6262- s16 preferred_mem_class;6363- /** @prefered_gt: preferred GT for this BO */6464- s16 preferred_gt;6565- /** @preferred_mem_type: preferred memory type */6666- s32 preferred_mem_type;6767- /**6868- * @cpu_atomic: the CPU expects to do atomics operations to6969- * this BO7070- */7171- bool cpu_atomic;7272- /**7373- * @device_atomic: the device expects to do atomics operations7474- * to this BO7575- */7676- bool device_atomic;7777- } props;7859 /** @freed: List node for delayed put. */7960 struct llist_node freed;8061 /** @created: Whether the bo has passed initial creation */
···132132 return -EINVAL;133133134134 eci = &resp.eci;135135- if (eci->gt_id > XE_MAX_GT_PER_TILE)135135+ if (eci->gt_id >= XE_MAX_GT_PER_TILE)136136 return -EINVAL;137137138138 gt = xe_device_get_gt(xe, eci->gt_id);
+4-3
drivers/i2c/busses/i2c-i801.c
···536536537537 if (read_write == I2C_SMBUS_READ ||538538 command == I2C_SMBUS_BLOCK_PROC_CALL) {539539- status = i801_get_block_len(priv);540540- if (status < 0)539539+ len = i801_get_block_len(priv);540540+ if (len < 0) {541541+ status = len;541542 goto out;543543+ }542544543543- len = status;544545 data->block[0] = len;545546 inb_p(SMBHSTCNT(priv)); /* reset the data buffer index */546547 for (i = 0; i < len; i++)
+25-13
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
···11391139 * requires a breaking update, zero the V bit, write all qwords11401140 * but 0, then set qword 011411141 */11421142- unused_update.data[0] = entry->data[0] & (~STRTAB_STE_0_V);11421142+ unused_update.data[0] = entry->data[0] &11431143+ cpu_to_le64(~STRTAB_STE_0_V);11431144 entry_set(smmu, sid, entry, &unused_update, 0, 1);11441145 entry_set(smmu, sid, entry, target, 1, num_entry_qwords - 1);11451146 entry_set(smmu, sid, entry, target, 0, 1);···14541453 FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT));14551454}1456145514571457-static void arm_smmu_make_bypass_ste(struct arm_smmu_ste *target)14561456+static void arm_smmu_make_bypass_ste(struct arm_smmu_device *smmu,14571457+ struct arm_smmu_ste *target)14581458{14591459 memset(target, 0, sizeof(*target));14601460 target->data[0] = cpu_to_le64(14611461 STRTAB_STE_0_V |14621462 FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_BYPASS));14631463- target->data[1] = cpu_to_le64(14641464- FIELD_PREP(STRTAB_STE_1_SHCFG, STRTAB_STE_1_SHCFG_INCOMING));14631463+14641464+ if (smmu->features & ARM_SMMU_FEAT_ATTR_TYPES_OVR)14651465+ target->data[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,14661466+ STRTAB_STE_1_SHCFG_INCOMING));14651467}1466146814671469static void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target,···15271523 typeof(&pgtbl_cfg->arm_lpae_s2_cfg.vtcr) vtcr =15281524 &pgtbl_cfg->arm_lpae_s2_cfg.vtcr;15291525 u64 vtcr_val;15261526+ struct arm_smmu_device *smmu = master->smmu;1530152715311528 memset(target, 0, sizeof(*target));15321529 target->data[0] = cpu_to_le64(···1536153115371532 target->data[1] = cpu_to_le64(15381533 FIELD_PREP(STRTAB_STE_1_EATS,15391539- master->ats_enabled ? STRTAB_STE_1_EATS_TRANS : 0) |15401540- FIELD_PREP(STRTAB_STE_1_SHCFG,15411541- STRTAB_STE_1_SHCFG_INCOMING));15341534+ master->ats_enabled ? STRTAB_STE_1_EATS_TRANS : 0));15351535+15361536+ if (smmu->features & ARM_SMMU_FEAT_ATTR_TYPES_OVR)15371537+ target->data[1] |= cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,15381538+ STRTAB_STE_1_SHCFG_INCOMING));1542153915431540 vtcr_val = FIELD_PREP(STRTAB_STE_2_VTCR_S2T0SZ, vtcr->tsz) |15441541 FIELD_PREP(STRTAB_STE_2_VTCR_S2SL0, vtcr->sl) |···15671560 * This can safely directly manipulate the STE memory without a sync sequence15681561 * because the STE table has not been installed in the SMMU yet.15691562 */15701570-static void arm_smmu_init_initial_stes(struct arm_smmu_ste *strtab,15631563+static void arm_smmu_init_initial_stes(struct arm_smmu_device *smmu,15641564+ struct arm_smmu_ste *strtab,15711565 unsigned int nent)15721566{15731567 unsigned int i;···15771569 if (disable_bypass)15781570 arm_smmu_make_abort_ste(strtab);15791571 else15801580- arm_smmu_make_bypass_ste(strtab);15721572+ arm_smmu_make_bypass_ste(smmu, strtab);15811573 strtab++;15821574 }15831575}···16051597 return -ENOMEM;16061598 }1607159916081608- arm_smmu_init_initial_stes(desc->l2ptr, 1 << STRTAB_SPLIT);16001600+ arm_smmu_init_initial_stes(smmu, desc->l2ptr, 1 << STRTAB_SPLIT);16091601 arm_smmu_write_strtab_l1_desc(strtab, desc);16101602 return 0;16111603}···26452637 struct device *dev)26462638{26472639 struct arm_smmu_ste ste;26402640+ struct arm_smmu_master *master = dev_iommu_priv_get(dev);2648264126492649- arm_smmu_make_bypass_ste(&ste);26422642+ arm_smmu_make_bypass_ste(master->smmu, &ste);26502643 return arm_smmu_attach_dev_ste(dev, &ste);26512644}26522645···32733264 reg |= FIELD_PREP(STRTAB_BASE_CFG_LOG2SIZE, smmu->sid_bits);32743265 cfg->strtab_base_cfg = reg;3275326632763276- arm_smmu_init_initial_stes(strtab, cfg->num_l1_ents);32673267+ arm_smmu_init_initial_stes(smmu, strtab, cfg->num_l1_ents);32773268 return 0;32783269}32793270···37863777 return -ENXIO;37873778 }3788377937803780+ if (reg & IDR1_ATTR_TYPES_OVR)37813781+ smmu->features |= ARM_SMMU_FEAT_ATTR_TYPES_OVR;37823782+37893783 /* Queue sizes, capped to ensure natural alignment */37903784 smmu->cmdq.q.llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT,37913785 FIELD_GET(IDR1_CMDQS, reg));···40043992 * STE table is not programmed to HW, see40053993 * arm_smmu_initial_bypass_stes()40063994 */40074007- arm_smmu_make_bypass_ste(39953995+ arm_smmu_make_bypass_ste(smmu,40083996 arm_smmu_get_step_for_sid(smmu, rmr->sids[i]));40093997 }40103998 }
···30913091{30923092 struct ice_vsi_cfg_params params = {};30933093 struct ice_coalesce_stored *coalesce;30943094- int prev_num_q_vectors = 0;30943094+ int prev_num_q_vectors;30953095 struct ice_pf *pf;30963096 int ret;30973097···31053105 if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf))31063106 return -EINVAL;3107310731083108- coalesce = kcalloc(vsi->num_q_vectors,31093109- sizeof(struct ice_coalesce_stored), GFP_KERNEL);31103110- if (!coalesce)31113111- return -ENOMEM;31123112-31133113- prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);31143114-31153108 ret = ice_vsi_realloc_stat_arrays(vsi);31163109 if (ret)31173110 goto err_vsi_cfg;···31133120 ret = ice_vsi_cfg_def(vsi, ¶ms);31143121 if (ret)31153122 goto err_vsi_cfg;31233123+31243124+ coalesce = kcalloc(vsi->num_q_vectors,31253125+ sizeof(struct ice_coalesce_stored), GFP_KERNEL);31263126+ if (!coalesce)31273127+ return -ENOMEM;31283128+31293129+ prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);3116313031173131 ret = ice_vsi_cfg_tc_lan(pf, vsi);31183132 if (ret) {···3139313931403140err_vsi_cfg_tc_lan:31413141 ice_vsi_decfg(vsi);31423142-err_vsi_cfg:31433142 kfree(coalesce);31433143+err_vsi_cfg:31443144 return ret;31453145}31463146
+14-10
drivers/net/ethernet/intel/ice/ice_switch.c
···20252025 * ice_aq_map_recipe_to_profile - Map recipe to packet profile20262026 * @hw: pointer to the HW struct20272027 * @profile_id: package profile ID to associate the recipe with20282028- * @r_bitmap: Recipe bitmap filled in and need to be returned as response20282028+ * @r_assoc: Recipe bitmap filled in and need to be returned as response20292029 * @cd: pointer to command details structure or NULL20302030 * Recipe to profile association (0x0291)20312031 */20322032int20332033-ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,20332033+ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 r_assoc,20342034 struct ice_sq_cd *cd)20352035{20362036 struct ice_aqc_recipe_to_profile *cmd;···20422042 /* Set the recipe ID bit in the bitmask to let the device know which20432043 * profile we are associating the recipe to20442044 */20452045- memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc));20452045+ cmd->recipe_assoc = cpu_to_le64(r_assoc);2046204620472047 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);20482048}···20512051 * ice_aq_get_recipe_to_profile - Map recipe to packet profile20522052 * @hw: pointer to the HW struct20532053 * @profile_id: package profile ID to associate the recipe with20542054- * @r_bitmap: Recipe bitmap filled in and need to be returned as response20542054+ * @r_assoc: Recipe bitmap filled in and need to be returned as response20552055 * @cd: pointer to command details structure or NULL20562056 * Associate profile ID with given recipe (0x0293)20572057 */20582058int20592059-ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,20592059+ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 *r_assoc,20602060 struct ice_sq_cd *cd)20612061{20622062 struct ice_aqc_recipe_to_profile *cmd;···2069206920702070 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);20712071 if (!status)20722072- memcpy(r_bitmap, cmd->recipe_assoc, sizeof(cmd->recipe_assoc));20722072+ *r_assoc = le64_to_cpu(cmd->recipe_assoc);2073207320742074 return status;20752075}···21082108static void ice_get_recp_to_prof_map(struct ice_hw *hw)21092109{21102110 DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);21112111+ u64 recp_assoc;21112112 u16 i;2112211321132114 for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {···2116211521172116 bitmap_zero(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);21182117 bitmap_zero(r_bitmap, ICE_MAX_NUM_RECIPES);21192119- if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))21182118+ if (ice_aq_get_recipe_to_profile(hw, i, &recp_assoc, NULL))21202119 continue;21202120+ bitmap_from_arr64(r_bitmap, &recp_assoc, ICE_MAX_NUM_RECIPES);21212121 bitmap_copy(profile_to_recipe[i], r_bitmap,21222122 ICE_MAX_NUM_RECIPES);21232123 for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)···53925390 */53935391 list_for_each_entry(fvit, &rm->fv_list, list_entry) {53945392 DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);53935393+ u64 recp_assoc;53955394 u16 j;5396539553975396 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,53985398- (u8 *)r_bitmap, NULL);53975397+ &recp_assoc, NULL);53995398 if (status)54005399 goto err_unroll;5401540054015401+ bitmap_from_arr64(r_bitmap, &recp_assoc, ICE_MAX_NUM_RECIPES);54025402 bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap,54035403 ICE_MAX_NUM_RECIPES);54045404 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);54055405 if (status)54065406 goto err_unroll;5407540754085408+ bitmap_to_arr64(&recp_assoc, r_bitmap, ICE_MAX_NUM_RECIPES);54085409 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,54095409- (u8 *)r_bitmap,54105410- NULL);54105410+ recp_assoc, NULL);54115411 ice_release_change_lock(hw);5412541254135413 if (status)
···1642164216431643 if (unlikely(test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags) &&16441644 skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {16451645- /* FIXME: add support for retrieving timestamps from16461646- * the other timer registers before skipping the16471647- * timestamping request.16481648- */16491645 unsigned long flags;16501646 u32 tstamp_flags;16511647
···5050 * the macros available to do this only define GCC 8.5151 */5252__diag_push();5353-__diag_ignore(GCC, 8, "-Woverride-init",5353+__diag_ignore_all("-Woverride-init",5454 "logic to initialize all and then override some is OK");5555static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {5656 SH_ETH_OFFSET_DEFAULTS,
···797797798798static int at8031_probe(struct phy_device *phydev)799799{800800- struct at803x_priv *priv = phydev->priv;800800+ struct at803x_priv *priv;801801 int mode_cfg;802802 int ccr;803803 int ret;···805805 ret = at803x_probe(phydev);806806 if (ret)807807 return ret;808808+809809+ priv = phydev->priv;808810809811 /* Only supported on AR8031/AR8033, the AR8030/AR8035 use strapping810812 * options.
···12601260 if (IS_ERR_OR_NULL(vif))12611261 return 1;1262126212631263- if (ieee80211_vif_is_mld(vif) && vif->cfg.assoc) {12631263+ if (hweight16(vif->active_links) > 1) {12641264 /*12651265- * Select the 'best' link. May need to revisit, it seems12661266- * better to not optimize for throughput but rather range,12671267- * reliability and power here - and select 2.4 GHz ...12651265+ * Select the 'best' link.12661266+ * May need to revisit, it seems better to not optimize12671267+ * for throughput but rather range, reliability and12681268+ * power here - and select 2.4 GHz ...12681269 */12691269- primary_link =12701270- iwl_mvm_mld_get_primary_link(mvm, vif,12711271- vif->active_links);12701270+ primary_link = iwl_mvm_mld_get_primary_link(mvm, vif,12711271+ vif->active_links);1272127212731273 if (WARN_ONCE(primary_link < 0, "no primary link in 0x%x\n",12741274 vif->active_links))···12771277 ret = ieee80211_set_active_links(vif, BIT(primary_link));12781278 if (ret)12791279 return ret;12801280+ } else if (vif->active_links) {12811281+ primary_link = __ffs(vif->active_links);12801282 } else {12811283 primary_link = 0;12821284 }
···4646 return ret;4747}48484949+int iwl_mvm_set_link_mapping(struct iwl_mvm *mvm, struct ieee80211_vif *vif,5050+ struct ieee80211_bss_conf *link_conf)5151+{5252+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);5353+ struct iwl_mvm_vif_link_info *link_info =5454+ mvmvif->link[link_conf->link_id];5555+5656+ if (link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID) {5757+ link_info->fw_link_id = iwl_mvm_get_free_fw_link_id(mvm,5858+ mvmvif);5959+ if (link_info->fw_link_id >=6060+ ARRAY_SIZE(mvm->link_id_to_link_conf))6161+ return -EINVAL;6262+6363+ rcu_assign_pointer(mvm->link_id_to_link_conf[link_info->fw_link_id],6464+ link_conf);6565+ }6666+6767+ return 0;6868+}6969+4970int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,5071 struct ieee80211_bss_conf *link_conf)5172{···7655 struct iwl_link_config_cmd cmd = {};7756 unsigned int cmd_id = WIDE_ID(MAC_CONF_GROUP, LINK_CONFIG_CMD);7857 u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1);5858+ int ret;79598060 if (WARN_ON_ONCE(!link_info))8161 return -EINVAL;82628383- if (link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID) {8484- link_info->fw_link_id = iwl_mvm_get_free_fw_link_id(mvm,8585- mvmvif);8686- if (link_info->fw_link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf))8787- return -EINVAL;8888-8989- rcu_assign_pointer(mvm->link_id_to_link_conf[link_info->fw_link_id],9090- link_conf);9191- }6363+ ret = iwl_mvm_set_link_mapping(mvm, vif, link_conf);6464+ if (ret)6565+ return ret;92669367 /* Update SF - Disable if needed. if this fails, SF might still be on9468 * while many macs are bound, which is forbidden - so fail the binding.···264248 return ret;265249}266250251251+int iwl_mvm_unset_link_mapping(struct iwl_mvm *mvm, struct ieee80211_vif *vif,252252+ struct ieee80211_bss_conf *link_conf)253253+{254254+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);255255+ struct iwl_mvm_vif_link_info *link_info =256256+ mvmvif->link[link_conf->link_id];257257+258258+ /* mac80211 thought we have the link, but it was never configured */259259+ if (WARN_ON(!link_info ||260260+ link_info->fw_link_id >=261261+ ARRAY_SIZE(mvm->link_id_to_link_conf)))262262+ return -EINVAL;263263+264264+ RCU_INIT_POINTER(mvm->link_id_to_link_conf[link_info->fw_link_id],265265+ NULL);266266+ return 0;267267+}268268+267269int iwl_mvm_remove_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,268270 struct ieee80211_bss_conf *link_conf)269271{···291257 struct iwl_link_config_cmd cmd = {};292258 int ret;293259294294- /* mac80211 thought we have the link, but it was never configured */295295- if (WARN_ON(!link_info ||296296- link_info->fw_link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf)))260260+ ret = iwl_mvm_unset_link_mapping(mvm, vif, link_conf);261261+ if (ret)297262 return 0;298263299299- RCU_INIT_POINTER(mvm->link_id_to_link_conf[link_info->fw_link_id],300300- NULL);301264 cmd.link_id = cpu_to_le32(link_info->fw_link_id);302265 iwl_mvm_release_fw_link_id(mvm, link_info->fw_link_id);303266 link_info->fw_link_id = IWL_MVM_FW_LINK_ID_INVALID;
+8-1
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
···360360 if (mvm->mld_api_is_used && mvm->nvm_data->sku_cap_11be_enable &&361361 !iwlwifi_mod_params.disable_11ax &&362362 !iwlwifi_mod_params.disable_11be)363363- hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO;363363+ hw->wiphy->flags |= WIPHY_FLAG_DISABLE_WEXT;364364365365 /* With MLD FW API, it tracks timing by itself,366366 * no need for any timing from the host···15771577 mvmvif->mvm = mvm;1578157815791579 /* the first link always points to the default one */15801580+ mvmvif->deflink.fw_link_id = IWL_MVM_FW_LINK_ID_INVALID;15811581+ mvmvif->deflink.active = 0;15801582 mvmvif->link[0] = &mvmvif->deflink;15831583+15841584+ ret = iwl_mvm_set_link_mapping(mvm, vif, &vif->bss_conf);15851585+ if (ret)15861586+ goto out;1581158715821588 /*15831589 * Not much to do here. The stack will not allow interface···17891783 mvm->p2p_device_vif = NULL;17901784 }1791178517861786+ iwl_mvm_unset_link_mapping(mvm, vif, &vif->bss_conf);17921787 iwl_mvm_mac_ctxt_remove(mvm, vif);1793178817941789 RCU_INIT_POINTER(mvm->vif_id_to_mac[mvmvif->id], NULL);
+6-1
drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
···855855856856int iwl_mvm_mld_rm_sta_id(struct iwl_mvm *mvm, u8 sta_id)857857{858858- int ret = iwl_mvm_mld_rm_sta_from_fw(mvm, sta_id);858858+ int ret;859859860860 lockdep_assert_held(&mvm->mutex);861861+862862+ if (WARN_ON(sta_id == IWL_MVM_INVALID_STA))863863+ return 0;864864+865865+ ret = iwl_mvm_mld_rm_sta_from_fw(mvm, sta_id);861866862867 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);863868 RCU_INIT_POINTER(mvm->fw_id_to_link_sta[sta_id], NULL);
···284284 return PTR_ERR(imgchip->sys_clk);285285 }286286287287- imgchip->pwm_clk = devm_clk_get(&pdev->dev, "imgchip");287287+ imgchip->pwm_clk = devm_clk_get(&pdev->dev, "pwm");288288 if (IS_ERR(imgchip->pwm_clk)) {289289- dev_err(&pdev->dev, "failed to get imgchip clock\n");289289+ dev_err(&pdev->dev, "failed to get pwm clock\n");290290 return PTR_ERR(imgchip->pwm_clk);291291 }292292
+39-18
drivers/ras/amd/fmpm.c
···150150/* Total length of record including headers and list of descriptor entries. */151151static size_t max_rec_len;152152153153+#define FMPM_MAX_REC_LEN (sizeof(struct fru_rec) + (sizeof(struct cper_fru_poison_desc) * 255))154154+153155/* Total number of SPA entries across all FRUs. */154156static unsigned int spa_nr_entries;155157···477475 struct cper_section_descriptor *sec_desc = &rec->sec_desc;478476 struct cper_record_header *hdr = &rec->hdr;479477478478+ /*479479+ * This is a saved record created with fewer max_nr_entries.480480+ * Update the record lengths and keep everything else as-is.481481+ */482482+ if (hdr->record_length && hdr->record_length < max_rec_len) {483483+ pr_debug("Growing record 0x%016llx from %u to %zu bytes\n",484484+ hdr->record_id, hdr->record_length, max_rec_len);485485+ goto update_lengths;486486+ }487487+480488 memcpy(hdr->signature, CPER_SIG_RECORD, CPER_SIG_SIZE);481489 hdr->revision = CPER_RECORD_REV;482490 hdr->signature_end = CPER_SIG_END;···501489 hdr->error_severity = CPER_SEV_RECOVERABLE;502490503491 hdr->validation_bits = 0;504504- hdr->record_length = max_rec_len;505492 hdr->creator_id = CPER_CREATOR_FMP;506493 hdr->notification_type = CPER_NOTIFY_MCE;507494 hdr->record_id = cper_next_record_id();508495 hdr->flags = CPER_HW_ERROR_FLAGS_PREVERR;509496510497 sec_desc->section_offset = sizeof(struct cper_record_header);511511- sec_desc->section_length = max_rec_len - sizeof(struct cper_record_header);512498 sec_desc->revision = CPER_SEC_REV;513499 sec_desc->validation_bits = 0;514500 sec_desc->flags = CPER_SEC_PRIMARY;515501 sec_desc->section_type = CPER_SECTION_TYPE_FMP;516502 sec_desc->section_severity = CPER_SEV_RECOVERABLE;503503+504504+update_lengths:505505+ hdr->record_length = max_rec_len;506506+ sec_desc->section_length = max_rec_len - sizeof(struct cper_record_header);517507}518508519509static int save_new_records(void)···526512 int ret = 0;527513528514 for_each_fru(i, rec) {529529- if (rec->hdr.record_length)515515+ /* No need to update saved records that match the current record size. */516516+ if (rec->hdr.record_length == max_rec_len)530517 continue;518518+519519+ if (!rec->hdr.record_length)520520+ set_bit(i, new_records);531521532522 set_rec_fields(rec);533523534524 ret = update_record_on_storage(rec);535525 if (ret)536526 goto out_clear;537537-538538- set_bit(i, new_records);539527 }540528541529 return ret;···657641 int ret, pos;658642 ssize_t len;659643660660- /*661661- * Assume saved records match current max size.662662- *663663- * However, this may not be true depending on module parameters.664664- */665665- old = kmalloc(max_rec_len, GFP_KERNEL);644644+ old = kmalloc(FMPM_MAX_REC_LEN, GFP_KERNEL);666645 if (!old) {667646 ret = -ENOMEM;668647 goto out;···674663 * Make sure to clear temporary buffer between reads to avoid675664 * leftover data from records of various sizes.676665 */677677- memset(old, 0, max_rec_len);666666+ memset(old, 0, FMPM_MAX_REC_LEN);678667679679- len = erst_read_record(record_id, &old->hdr, max_rec_len,668668+ len = erst_read_record(record_id, &old->hdr, FMPM_MAX_REC_LEN,680669 sizeof(struct fru_rec), &CPER_CREATOR_FMP);681670 if (len < 0)682671 continue;683672684684- if (len > max_rec_len) {685685- pr_debug("Found record larger than max_rec_len\n");673673+ new = get_valid_record(old);674674+ if (!new) {675675+ erst_clear(record_id);686676 continue;687677 }688678689689- new = get_valid_record(old);690690- if (!new)691691- erst_clear(record_id);679679+ if (len > max_rec_len) {680680+ unsigned int saved_nr_entries;681681+682682+ saved_nr_entries = len - sizeof(struct fru_rec);683683+ saved_nr_entries /= sizeof(struct cper_fru_poison_desc);684684+685685+ pr_warn("Saved record found with %u entries.\n", saved_nr_entries);686686+ pr_warn("Please increase max_nr_entries to %u.\n", saved_nr_entries);687687+688688+ ret = -EINVAL;689689+ goto out_end;690690+ }692691693692 /* Restore the record */694693 memcpy(new, old, len);
···353353354354 if (shost->shost_state == SHOST_CREATED) {355355 /*356356- * Free the shost_dev device name here if scsi_host_alloc()357357- * and scsi_host_put() have been called but neither356356+ * Free the shost_dev device name and remove the proc host dir357357+ * here if scsi_host_{alloc,put}() have been called but neither358358 * scsi_host_add() nor scsi_remove_host() has been called.359359 * This avoids that the memory allocated for the shost_dev360360- * name is leaked.360360+ * name as well as the proc dir structure are leaked.361361 */362362+ scsi_proc_hostdir_rm(shost->hostt);362363 kfree(dev_name(&shost->shost_dev));363364 }364365
+34-17
drivers/scsi/libsas/sas_expander.c
···1621162116221622/* ---------- Domain revalidation ---------- */1623162316241624+static void sas_get_sas_addr_and_dev_type(struct smp_disc_resp *disc_resp,16251625+ u8 *sas_addr,16261626+ enum sas_device_type *type)16271627+{16281628+ memcpy(sas_addr, disc_resp->disc.attached_sas_addr, SAS_ADDR_SIZE);16291629+ *type = to_dev_type(&disc_resp->disc);16301630+ if (*type == SAS_PHY_UNUSED)16311631+ memset(sas_addr, 0, SAS_ADDR_SIZE);16321632+}16331633+16241634static int sas_get_phy_discover(struct domain_device *dev,16251635 int phy_id, struct smp_disc_resp *disc_resp)16261636{···16841674 return -ENOMEM;1685167516861676 res = sas_get_phy_discover(dev, phy_id, disc_resp);16871687- if (res == 0) {16881688- memcpy(sas_addr, disc_resp->disc.attached_sas_addr,16891689- SAS_ADDR_SIZE);16901690- *type = to_dev_type(&disc_resp->disc);16911691- if (*type == 0)16921692- memset(sas_addr, 0, SAS_ADDR_SIZE);16931693- }16771677+ if (res == 0)16781678+ sas_get_sas_addr_and_dev_type(disc_resp, sas_addr, type);16941679 kfree(disc_resp);16951680 return res;16961681}···19451940 struct expander_device *ex = &dev->ex_dev;19461941 struct ex_phy *phy = &ex->ex_phy[phy_id];19471942 enum sas_device_type type = SAS_PHY_UNUSED;19431943+ struct smp_disc_resp *disc_resp;19481944 u8 sas_addr[SAS_ADDR_SIZE];19491945 char msg[80] = "";19501946 int res;···19571951 SAS_ADDR(dev->sas_addr), phy_id, msg);1958195219591953 memset(sas_addr, 0, SAS_ADDR_SIZE);19601960- res = sas_get_phy_attached_dev(dev, phy_id, sas_addr, &type);19541954+ disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE);19551955+ if (!disc_resp)19561956+ return -ENOMEM;19571957+19581958+ res = sas_get_phy_discover(dev, phy_id, disc_resp);19611959 switch (res) {19621960 case SMP_RESP_NO_PHY:19631961 phy->phy_state = PHY_NOT_PRESENT;19641962 sas_unregister_devs_sas_addr(dev, phy_id, last);19651965- return res;19631963+ goto out_free_resp;19661964 case SMP_RESP_PHY_VACANT:19671965 phy->phy_state = PHY_VACANT;19681966 sas_unregister_devs_sas_addr(dev, phy_id, last);19691969- return res;19671967+ goto out_free_resp;19701968 case SMP_RESP_FUNC_ACC:19711969 break;19721970 case -ECOMM:19731971 break;19741972 default:19751975- return res;19731973+ goto out_free_resp;19761974 }19751975+19761976+ if (res == 0)19771977+ sas_get_sas_addr_and_dev_type(disc_resp, sas_addr, &type);1977197819781979 if ((SAS_ADDR(sas_addr) == 0) || (res == -ECOMM)) {19791980 phy->phy_state = PHY_EMPTY;19801981 sas_unregister_devs_sas_addr(dev, phy_id, last);19811982 /*19821982- * Even though the PHY is empty, for convenience we discover19831983- * the PHY to update the PHY info, like negotiated linkrate.19831983+ * Even though the PHY is empty, for convenience we update19841984+ * the PHY info, like negotiated linkrate.19841985 */19851985- sas_ex_phy_discover(dev, phy_id);19861986- return res;19861986+ if (res == 0)19871987+ sas_set_ex_phy(dev, phy_id, disc_resp);19881988+ goto out_free_resp;19871989 } else if (SAS_ADDR(sas_addr) == SAS_ADDR(phy->attached_sas_addr) &&19881990 dev_type_flutter(type, phy->attached_dev_type)) {19891991 struct domain_device *ata_dev = sas_ex_to_ata(dev, phy_id);···20031989 action = ", needs recovery";20041990 pr_debug("ex %016llx phy%02d broadcast flutter%s\n",20051991 SAS_ADDR(dev->sas_addr), phy_id, action);20062006- return res;19921992+ goto out_free_resp;20071993 }2008199420091995 /* we always have to delete the old device when we went here */···20121998 SAS_ADDR(phy->attached_sas_addr));20131999 sas_unregister_devs_sas_addr(dev, phy_id, last);2014200020152015- return sas_discover_new(dev, phy_id);20012001+ res = sas_discover_new(dev, phy_id);20022002+out_free_resp:20032003+ kfree(disc_resp);20042004+ return res;20162005}2017200620182007/**
+1-1
drivers/scsi/lpfc/lpfc.h
···13331333 struct timer_list fabric_block_timer;13341334 unsigned long bit_flags;13351335 atomic_t num_rsrc_err;13361336- atomic_t num_cmd_success;13371336 unsigned long last_rsrc_error_time;13381337 unsigned long last_ramp_down_time;13391338#ifdef CONFIG_SCSI_LPFC_DEBUG_FS···14371438 struct timer_list inactive_vmid_poll;1438143914391440 /* RAS Support */14411441+ spinlock_t ras_fwlog_lock; /* do not take while holding another lock */14401442 struct lpfc_ras_fwlog ras_fwlog;1441144314421444 uint32_t iocb_cnt;
+2-2
drivers/scsi/lpfc/lpfc_attr.c
···58655865 if (phba->cfg_ras_fwlog_func != PCI_FUNC(phba->pcidev->devfn))58665866 return -EINVAL;5867586758685868- spin_lock_irq(&phba->hbalock);58685868+ spin_lock_irq(&phba->ras_fwlog_lock);58695869 state = phba->ras_fwlog.state;58705870- spin_unlock_irq(&phba->hbalock);58705870+ spin_unlock_irq(&phba->ras_fwlog_lock);5871587158725872 if (state == REG_INPROGRESS) {58735873 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "6147 RAS Logging "
+20-20
drivers/scsi/lpfc/lpfc_bsg.c
···25132513 return -ENOMEM;25142514 }2515251525162516- dmabuff = (struct lpfc_dmabuf *)mbox->ctx_buf;25162516+ dmabuff = mbox->ctx_buf;25172517 mbox->ctx_buf = NULL;25182518 mbox->ctx_ndlp = NULL;25192519 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);···31693169 }3170317031713171 cmdwqe = &cmdiocbq->wqe;31723172- memset(cmdwqe, 0, sizeof(union lpfc_wqe));31723172+ memset(cmdwqe, 0, sizeof(*cmdwqe));31733173 if (phba->sli_rev < LPFC_SLI_REV4) {31743174 rspwqe = &rspiocbq->wqe;31753175- memset(rspwqe, 0, sizeof(union lpfc_wqe));31753175+ memset(rspwqe, 0, sizeof(*rspwqe));31763176 }3177317731783178 INIT_LIST_HEAD(&head);···33763376 unsigned long flags;33773377 uint8_t *pmb, *pmb_buf;3378337833793379- dd_data = pmboxq->ctx_ndlp;33793379+ dd_data = pmboxq->ctx_u.dd_data;3380338033813381 /*33823382 * The outgoing buffer is readily referred from the dma buffer,···35533553 struct lpfc_sli_config_mbox *sli_cfg_mbx;35543554 uint8_t *pmbx;3555355535563556- dd_data = pmboxq->ctx_buf;35563556+ dd_data = pmboxq->ctx_u.dd_data;3557355735583558 /* Determine if job has been aborted */35593559 spin_lock_irqsave(&phba->ct_ev_lock, flags);···39403940 pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl;3941394139423942 /* context fields to callback function */39433943- pmboxq->ctx_buf = dd_data;39433943+ pmboxq->ctx_u.dd_data = dd_data;39443944 dd_data->type = TYPE_MBOX;39453945 dd_data->set_job = job;39463946 dd_data->context_un.mbox.pmboxq = pmboxq;···41124112 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;4113411341144114 /* context fields to callback function */41154115- pmboxq->ctx_buf = dd_data;41154115+ pmboxq->ctx_u.dd_data = dd_data;41164116 dd_data->type = TYPE_MBOX;41174117 dd_data->set_job = job;41184118 dd_data->context_un.mbox.pmboxq = pmboxq;···44604460 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;4461446144624462 /* context fields to callback function */44634463- pmboxq->ctx_buf = dd_data;44634463+ pmboxq->ctx_u.dd_data = dd_data;44644464 dd_data->type = TYPE_MBOX;44654465 dd_data->set_job = job;44664466 dd_data->context_un.mbox.pmboxq = pmboxq;···47474747 if (mbox_req->inExtWLen || mbox_req->outExtWLen) {47484748 from = pmbx;47494749 ext = from + sizeof(MAILBOX_t);47504750- pmboxq->ctx_buf = ext;47504750+ pmboxq->ext_buf = ext;47514751 pmboxq->in_ext_byte_len =47524752 mbox_req->inExtWLen * sizeof(uint32_t);47534753 pmboxq->out_ext_byte_len =···48754875 pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl;4876487648774877 /* setup context field to pass wait_queue pointer to wake function */48784878- pmboxq->ctx_ndlp = dd_data;48784878+ pmboxq->ctx_u.dd_data = dd_data;48794879 dd_data->type = TYPE_MBOX;48804880 dd_data->set_job = job;48814881 dd_data->context_un.mbox.pmboxq = pmboxq;···50705070 bsg_reply->reply_data.vendor_reply.vendor_rsp;5071507150725072 /* Current logging state */50735073- spin_lock_irq(&phba->hbalock);50735073+ spin_lock_irq(&phba->ras_fwlog_lock);50745074 if (ras_fwlog->state == ACTIVE)50755075 ras_reply->state = LPFC_RASLOG_STATE_RUNNING;50765076 else50775077 ras_reply->state = LPFC_RASLOG_STATE_STOPPED;50785078- spin_unlock_irq(&phba->hbalock);50785078+ spin_unlock_irq(&phba->ras_fwlog_lock);5079507950805080 ras_reply->log_level = phba->ras_fwlog.fw_loglevel;50815081 ras_reply->log_buff_sz = phba->cfg_ras_fwlog_buffsize;···5132513251335133 if (action == LPFC_RASACTION_STOP_LOGGING) {51345134 /* Check if already disabled */51355135- spin_lock_irq(&phba->hbalock);51355135+ spin_lock_irq(&phba->ras_fwlog_lock);51365136 if (ras_fwlog->state != ACTIVE) {51375137- spin_unlock_irq(&phba->hbalock);51375137+ spin_unlock_irq(&phba->ras_fwlog_lock);51385138 rc = -ESRCH;51395139 goto ras_job_error;51405140 }51415141- spin_unlock_irq(&phba->hbalock);51415141+ spin_unlock_irq(&phba->ras_fwlog_lock);5142514251435143 /* Disable logging */51445144 lpfc_ras_stop_fwlog(phba);···51495149 * FW-logging with new log-level. Return status51505150 * "Logging already Running" to caller.51515151 **/51525152- spin_lock_irq(&phba->hbalock);51525152+ spin_lock_irq(&phba->ras_fwlog_lock);51535153 if (ras_fwlog->state != INACTIVE)51545154 action_status = -EINPROGRESS;51555155- spin_unlock_irq(&phba->hbalock);51555155+ spin_unlock_irq(&phba->ras_fwlog_lock);5156515651575157 /* Enable logging */51585158 rc = lpfc_sli4_ras_fwlog_init(phba, log_level,···52685268 goto ras_job_error;5269526952705270 /* Logging to be stopped before reading */52715271- spin_lock_irq(&phba->hbalock);52715271+ spin_lock_irq(&phba->ras_fwlog_lock);52725272 if (ras_fwlog->state == ACTIVE) {52735273- spin_unlock_irq(&phba->hbalock);52735273+ spin_unlock_irq(&phba->ras_fwlog_lock);52745274 rc = -EINPROGRESS;52755275 goto ras_job_error;52765276 }52775277- spin_unlock_irq(&phba->hbalock);52775277+ spin_unlock_irq(&phba->ras_fwlog_lock);5278527852795279 if (job->request_len <52805280 sizeof(struct fc_bsg_request) +
+6-6
drivers/scsi/lpfc/lpfc_debugfs.c
···2194219421952195 memset(buffer, 0, size);2196219621972197- spin_lock_irq(&phba->hbalock);21972197+ spin_lock_irq(&phba->ras_fwlog_lock);21982198 if (phba->ras_fwlog.state != ACTIVE) {21992199- spin_unlock_irq(&phba->hbalock);21992199+ spin_unlock_irq(&phba->ras_fwlog_lock);22002200 return -EINVAL;22012201 }22022202- spin_unlock_irq(&phba->hbalock);22022202+ spin_unlock_irq(&phba->ras_fwlog_lock);2203220322042204 list_for_each_entry_safe(dmabuf, next,22052205 &phba->ras_fwlog.fwlog_buff_list, list) {···22502250 int size;22512251 int rc = -ENOMEM;2252225222532253- spin_lock_irq(&phba->hbalock);22532253+ spin_lock_irq(&phba->ras_fwlog_lock);22542254 if (phba->ras_fwlog.state != ACTIVE) {22552255- spin_unlock_irq(&phba->hbalock);22552255+ spin_unlock_irq(&phba->ras_fwlog_lock);22562256 rc = -EINVAL;22572257 goto out;22582258 }22592259- spin_unlock_irq(&phba->hbalock);22592259+ spin_unlock_irq(&phba->ras_fwlog_lock);2260226022612261 if (check_mul_overflow(LPFC_RAS_MIN_BUFF_POST_SIZE,22622262 phba->cfg_ras_fwlog_buffsize, &size))
···26162616 /* No concern about the role change on the nvme remoteport.26172617 * The transport will update it.26182618 */26192619- spin_lock_irq(&vport->phba->hbalock);26192619+ spin_lock_irq(&ndlp->lock);26202620 ndlp->fc4_xpt_flags |= NVME_XPT_UNREG_WAIT;26212621- spin_unlock_irq(&vport->phba->hbalock);26212621+ spin_unlock_irq(&ndlp->lock);2622262226232623 /* Don't let the host nvme transport keep sending keep-alives26242624 * on this remoteport. Vport is unloading, no recovery. The
···167167 struct Scsi_Host *shost;168168 struct scsi_device *sdev;169169 unsigned long new_queue_depth;170170- unsigned long num_rsrc_err, num_cmd_success;170170+ unsigned long num_rsrc_err;171171 int i;172172173173 num_rsrc_err = atomic_read(&phba->num_rsrc_err);174174- num_cmd_success = atomic_read(&phba->num_cmd_success);175174176175 /*177176 * The error and success command counters are global per···185186 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {186187 shost = lpfc_shost_from_vport(vports[i]);187188 shost_for_each_device(sdev, shost) {188188- new_queue_depth =189189- sdev->queue_depth * num_rsrc_err /190190- (num_rsrc_err + num_cmd_success);191191- if (!new_queue_depth)192192- new_queue_depth = sdev->queue_depth - 1;189189+ if (num_rsrc_err >= sdev->queue_depth)190190+ new_queue_depth = 1;193191 else194192 new_queue_depth = sdev->queue_depth -195195- new_queue_depth;193193+ num_rsrc_err;196194 scsi_change_queue_depth(sdev, new_queue_depth);197195 }198196 }199197 lpfc_destroy_vport_work_array(phba, vports);200198 atomic_set(&phba->num_rsrc_err, 0);201201- atomic_set(&phba->num_cmd_success, 0);202199}203200204201/**···53315336 }53325337 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);53335338 } else {53345334- if (vport->phba->cfg_enable_bg) {53355335- lpfc_printf_vlog(vport,53365336- KERN_INFO, LOG_SCSI_CMD,53375337- "9038 BLKGRD: rcvd PROT_NORMAL cmd: "53385338- "x%x reftag x%x cnt %u pt %x\n",53395339- cmnd->cmnd[0],53405340- scsi_prot_ref_tag(cmnd),53415341- scsi_logical_block_count(cmnd),53425342- (cmnd->cmnd[1]>>5));53435343- }53445339 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);53455340 }53465341
+49-50
drivers/scsi/lpfc/lpfc_sli.c
···12171217 empty = list_empty(&phba->active_rrq_list);12181218 list_add_tail(&rrq->list, &phba->active_rrq_list);12191219 phba->hba_flag |= HBA_RRQ_ACTIVE;12201220+ spin_unlock_irqrestore(&phba->hbalock, iflags);12201221 if (empty)12211222 lpfc_worker_wake_up(phba);12221222- spin_unlock_irqrestore(&phba->hbalock, iflags);12231223 return 0;12241224out:12251225 spin_unlock_irqrestore(&phba->hbalock, iflags);···28302830 */28312831 pmboxq->mbox_flag |= LPFC_MBX_WAKE;28322832 spin_lock_irqsave(&phba->hbalock, drvr_flag);28332833- pmbox_done = (struct completion *)pmboxq->context3;28332833+ pmbox_done = pmboxq->ctx_u.mbox_wait;28342834 if (pmbox_done)28352835 complete(pmbox_done);28362836 spin_unlock_irqrestore(&phba->hbalock, drvr_flag);···28852885 if (!test_bit(FC_UNLOADING, &phba->pport->load_flag) &&28862886 pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&28872887 !pmb->u.mb.mbxStatus) {28882888- mp = (struct lpfc_dmabuf *)pmb->ctx_buf;28882888+ mp = pmb->ctx_buf;28892889 if (mp) {28902890 pmb->ctx_buf = NULL;28912891 lpfc_mbuf_free(phba, mp->virt, mp->phys);···29142914 }2915291529162916 if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {29172917- ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;29172917+ ndlp = pmb->ctx_ndlp;29182918 lpfc_nlp_put(ndlp);29192919 }2920292029212921 if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {29222922- ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;29222922+ ndlp = pmb->ctx_ndlp;2923292329242924 /* Check to see if there are any deferred events to process */29252925 if (ndlp) {···2952295229532953 /* This nlp_put pairs with lpfc_sli4_resume_rpi */29542954 if (pmb->u.mb.mbxCommand == MBX_RESUME_RPI) {29552955- ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;29552955+ ndlp = pmb->ctx_ndlp;29562956 lpfc_nlp_put(ndlp);29572957 }29582958···58195819 goto out_free_mboxq;58205820 }5821582158225822- mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;58225822+ mp = mboxq->ctx_buf;58235823 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);5824582458255825 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,···68496849{68506850 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;6851685168526852- spin_lock_irq(&phba->hbalock);68526852+ spin_lock_irq(&phba->ras_fwlog_lock);68536853 ras_fwlog->state = INACTIVE;68546854- spin_unlock_irq(&phba->hbalock);68546854+ spin_unlock_irq(&phba->ras_fwlog_lock);6855685568566856 /* Disable FW logging to host memory */68576857 writel(LPFC_CTL_PDEV_CTL_DDL_RAS,···68946894 ras_fwlog->lwpd.virt = NULL;68956895 }6896689668976897- spin_lock_irq(&phba->hbalock);68976897+ spin_lock_irq(&phba->ras_fwlog_lock);68986898 ras_fwlog->state = INACTIVE;68996899- spin_unlock_irq(&phba->hbalock);68996899+ spin_unlock_irq(&phba->ras_fwlog_lock);69006900}6901690169026902/**···69986998 goto disable_ras;69996999 }7000700070017001- spin_lock_irq(&phba->hbalock);70017001+ spin_lock_irq(&phba->ras_fwlog_lock);70027002 ras_fwlog->state = ACTIVE;70037003- spin_unlock_irq(&phba->hbalock);70037003+ spin_unlock_irq(&phba->ras_fwlog_lock);70047004 mempool_free(pmb, phba->mbox_mem_pool);7005700570067006 return;···70327032 uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;70337033 int rc = 0;7034703470357035- spin_lock_irq(&phba->hbalock);70357035+ spin_lock_irq(&phba->ras_fwlog_lock);70367036 ras_fwlog->state = INACTIVE;70377037- spin_unlock_irq(&phba->hbalock);70377037+ spin_unlock_irq(&phba->ras_fwlog_lock);7038703870397039 fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *70407040 phba->cfg_ras_fwlog_buffsize);···70957095 mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);70967096 mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);7097709770987098- spin_lock_irq(&phba->hbalock);70987098+ spin_lock_irq(&phba->ras_fwlog_lock);70997099 ras_fwlog->state = REG_INPROGRESS;71007100- spin_unlock_irq(&phba->hbalock);71007100+ spin_unlock_irq(&phba->ras_fwlog_lock);71017101 mbox->vport = phba->pport;71027102 mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;71037103···8766876687678767 mboxq->vport = vport;87688768 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);87698769- mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;87698769+ mp = mboxq->ctx_buf;87708770 if (rc == MBX_SUCCESS) {87718771 memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));87728772 rc = 0;···95489548 }9549954995509550 /* Copy the mailbox extension data */95519551- if (pmbox->in_ext_byte_len && pmbox->ctx_buf) {95529552- lpfc_sli_pcimem_bcopy(pmbox->ctx_buf,95519551+ if (pmbox->in_ext_byte_len && pmbox->ext_buf) {95529552+ lpfc_sli_pcimem_bcopy(pmbox->ext_buf,95539553 (uint8_t *)phba->mbox_ext,95549554 pmbox->in_ext_byte_len);95559555 }···95629562 = MAILBOX_HBA_EXT_OFFSET;9563956395649564 /* Copy the mailbox extension data */95659565- if (pmbox->in_ext_byte_len && pmbox->ctx_buf)95659565+ if (pmbox->in_ext_byte_len && pmbox->ext_buf)95669566 lpfc_memcpy_to_slim(phba->MBslimaddr +95679567 MAILBOX_HBA_EXT_OFFSET,95689568- pmbox->ctx_buf, pmbox->in_ext_byte_len);95689568+ pmbox->ext_buf, pmbox->in_ext_byte_len);9569956995709570 if (mbx->mbxCommand == MBX_CONFIG_PORT)95719571 /* copy command data into host mbox for cmpl */···96889688 lpfc_sli_pcimem_bcopy(phba->mbox, mbx,96899689 MAILBOX_CMD_SIZE);96909690 /* Copy the mailbox extension data */96919691- if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {96919691+ if (pmbox->out_ext_byte_len && pmbox->ext_buf) {96929692 lpfc_sli_pcimem_bcopy(phba->mbox_ext,96939693- pmbox->ctx_buf,96939693+ pmbox->ext_buf,96949694 pmbox->out_ext_byte_len);96959695 }96969696 } else {···96989698 lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,96999699 MAILBOX_CMD_SIZE);97009700 /* Copy the mailbox extension data */97019701- if (pmbox->out_ext_byte_len && pmbox->ctx_buf) {97019701+ if (pmbox->out_ext_byte_len && pmbox->ext_buf) {97029702 lpfc_memcpy_from_slim(97039703- pmbox->ctx_buf,97039703+ pmbox->ext_buf,97049704 phba->MBslimaddr +97059705 MAILBOX_HBA_EXT_OFFSET,97069706 pmbox->out_ext_byte_len);···1137311373 unsigned long iflags;1137411374 struct lpfc_work_evt *evtp = &ndlp->recovery_evt;11375113751137611376+ /* Hold a node reference for outstanding queued work */1137711377+ if (!lpfc_nlp_get(ndlp))1137811378+ return;1137911379+1137611380 spin_lock_irqsave(&phba->hbalock, iflags);1137711381 if (!list_empty(&evtp->evt_listp)) {1137811382 spin_unlock_irqrestore(&phba->hbalock, iflags);1138311383+ lpfc_nlp_put(ndlp);1137911384 return;1138011385 }11381113861138211382- /* Incrementing the reference count until the queued work is done. */1138311383- evtp->evt_arg1 = lpfc_nlp_get(ndlp);1138411384- if (!evtp->evt_arg1) {1138511385- spin_unlock_irqrestore(&phba->hbalock, iflags);1138611386- return;1138711387- }1138711387+ evtp->evt_arg1 = ndlp;1138811388 evtp->evt = LPFC_EVT_RECOVER_PORT;1138911389 list_add_tail(&evtp->evt_listp, &phba->work_list);1139011390 spin_unlock_irqrestore(&phba->hbalock, iflags);···1326213262 /* setup wake call as IOCB callback */1326313263 pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;13264132641326513265- /* setup context3 field to pass wait_queue pointer to wake function */1326513265+ /* setup ctx_u field to pass wait_queue pointer to wake function */1326613266 init_completion(&mbox_done);1326713267- pmboxq->context3 = &mbox_done;1326713267+ pmboxq->ctx_u.mbox_wait = &mbox_done;1326813268 /* now issue the command */1326913269 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);1327013270 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {···1327213272 msecs_to_jiffies(timeout * 1000));13273132731327413274 spin_lock_irqsave(&phba->hbalock, flag);1327513275- pmboxq->context3 = NULL;1327513275+ pmboxq->ctx_u.mbox_wait = NULL;1327613276 /*1327713277 * if LPFC_MBX_WAKE flag is set the mailbox is completed1327813278 * else do not free the resources.···1381313813 lpfc_sli_pcimem_bcopy(mbox, pmbox,1381413814 MAILBOX_CMD_SIZE);1381513815 if (pmb->out_ext_byte_len &&1381613816- pmb->ctx_buf)1381613816+ pmb->ext_buf)1381713817 lpfc_sli_pcimem_bcopy(1381813818 phba->mbox_ext,1381913819- pmb->ctx_buf,1381913819+ pmb->ext_buf,1382013820 pmb->out_ext_byte_len);1382113821 }1382213822 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {···1383013830 pmbox->un.varWords[0], 0);13831138311383213832 if (!pmbox->mbxStatus) {1383313833- mp = (struct lpfc_dmabuf *)1383413834- (pmb->ctx_buf);1383513835- ndlp = (struct lpfc_nodelist *)1383613836- pmb->ctx_ndlp;1383313833+ mp = pmb->ctx_buf;1383413834+ ndlp = pmb->ctx_ndlp;13837138351383813836 /* Reg_LOGIN of dflt RPI was1383913837 * successful. new lets get···1433814340 mcqe_status,1433914341 pmbox->un.varWords[0], 0);1434014342 if (mcqe_status == MB_CQE_STATUS_SUCCESS) {1434114341- mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);1434214342- ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;1434314343+ mp = pmb->ctx_buf;1434414344+ ndlp = pmb->ctx_ndlp;14343143451434414346 /* Reg_LOGIN of dflt RPI was successful. Mark the1434514347 * node as having an UNREG_LOGIN in progress to stop···1982119823 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region1982219824 * @ndlp: pointer to lpfc nodelist data structure.1982319825 * @cmpl: completion call-back.1982419824- * @arg: data to load as MBox 'caller buffer information'1982619826+ * @iocbq: data to load as mbox ctx_u information1982519827 *1982619828 * This routine is invoked to remove the memory region that1982719829 * provided rpi via a bitmask.1982819830 **/1982919831int1983019832lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,1983119831- void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)1983319833+ void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *),1983419834+ struct lpfc_iocbq *iocbq)1983219835{1983319836 LPFC_MBOXQ_t *mboxq;1983419837 struct lpfc_hba *phba = ndlp->phba;···1985819859 lpfc_resume_rpi(mboxq, ndlp);1985919860 if (cmpl) {1986019861 mboxq->mbox_cmpl = cmpl;1986119861- mboxq->ctx_buf = arg;1986219862+ mboxq->ctx_u.save_iocb = iocbq;1986219863 } else1986319864 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;1986419865 mboxq->ctx_ndlp = ndlp;···2067520676 if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))2067620677 goto out;2067720678 mqe = &mboxq->u.mqe;2067820678- mp = (struct lpfc_dmabuf *)mboxq->ctx_buf;2067920679+ mp = mboxq->ctx_buf;2067920680 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);2068020681 if (rc)2068120682 goto out;···2103421035 (mb->u.mb.mbxCommand == MBX_REG_VPI))2103521036 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;2103621037 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {2103721037- act_mbx_ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;2103821038+ act_mbx_ndlp = mb->ctx_ndlp;21038210392103921040 /* This reference is local to this routine. The2104021041 * reference is removed at routine exit.···21063210642106421065 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;2106521066 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {2106621066- ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;2106721067+ ndlp = mb->ctx_ndlp;2106721068 /* Unregister the RPI when mailbox complete */2106821069 mb->mbox_flag |= LPFC_MBX_IMED_UNREG;2106921070 restart_loop = 1;···2108321084 while (!list_empty(&mbox_cmd_list)) {2108421085 list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);2108521086 if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {2108621086- ndlp = (struct lpfc_nodelist *)mb->ctx_ndlp;2108721087+ ndlp = mb->ctx_ndlp;2108721088 mb->ctx_ndlp = NULL;2108821089 if (ndlp) {2108921090 spin_lock(&ndlp->lock);
+24-6
drivers/scsi/lpfc/lpfc_sli.h
···11/*******************************************************************22 * This file is part of the Emulex Linux Device Driver for *33 * Fibre Channel Host Bus Adapters. *44- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *44+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *55 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *66 * Copyright (C) 2004-2016 Emulex. All rights reserved. *77 * EMULEX and SLI are trademarks of Emulex. *···182182 struct lpfc_mqe mqe;183183 } u;184184 struct lpfc_vport *vport; /* virtual port pointer */185185- void *ctx_ndlp; /* an lpfc_nodelist pointer */186186- void *ctx_buf; /* an lpfc_dmabuf pointer */187187- void *context3; /* a generic pointer. Code must188188- * accommodate the actual datatype.189189- */185185+ struct lpfc_nodelist *ctx_ndlp; /* caller ndlp pointer */186186+ struct lpfc_dmabuf *ctx_buf; /* caller buffer information */187187+ void *ext_buf; /* extended buffer for extended mbox188188+ * cmds. Not a generic pointer.189189+ * Use for storing virtual address.190190+ */191191+192192+ /* Pointers that are seldom used during mbox execution, but require193193+ * a saved context.194194+ */195195+ union {196196+ unsigned long ox_rx_id; /* Used in els_rsp_rls_acc */197197+ struct lpfc_rdp_context *rdp; /* Used in get_rdp_info */198198+ struct lpfc_lcb_context *lcb; /* Used in set_beacon */199199+ struct completion *mbox_wait; /* Used in issue_mbox_wait */200200+ struct bsg_job_data *dd_data; /* Used in bsg_issue_mbox_cmpl201201+ * and202202+ * bsg_issue_mbox_ext_handle_job203203+ */204204+ struct lpfc_iocbq *save_iocb; /* Used in defer_plogi_acc and205205+ * lpfc_mbx_cmpl_resume_rpi206206+ */207207+ } ctx_u;190208191209 void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *);192210 uint8_t mbox_flag;
+4-3
drivers/scsi/lpfc/lpfc_sli4.h
···11/*******************************************************************22 * This file is part of the Emulex Linux Device Driver for *33 * Fibre Channel Host Bus Adapters. *44- * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *44+ * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *55 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *66 * Copyright (C) 2009-2016 Emulex. All rights reserved. *77 * EMULEX and SLI are trademarks of Emulex. *···11181118void lpfc_sli4_remove_rpis(struct lpfc_hba *);11191119void lpfc_sli4_async_event_proc(struct lpfc_hba *);11201120void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *);11211121-int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,11221122- void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *);11211121+int lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,11221122+ void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *),11231123+ struct lpfc_iocbq *iocbq);11231124void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba);11241125void lpfc_sli4_nvme_pci_offline_aborted(struct lpfc_hba *phba,11251126 struct lpfc_io_buf *lpfc_ncmd);
+1-1
drivers/scsi/lpfc/lpfc_version.h
···2020 * included with this package. *2121 *******************************************************************/22222323-#define LPFC_DRIVER_VERSION "14.4.0.0"2323+#define LPFC_DRIVER_VERSION "14.4.0.1"2424#define LPFC_DRIVER_NAME "lpfc"25252626/* Used for SLI 2/3 */
+5-5
drivers/scsi/lpfc/lpfc_vport.c
···166166 }167167 }168168169169- mp = (struct lpfc_dmabuf *)pmb->ctx_buf;169169+ mp = pmb->ctx_buf;170170 memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));171171 memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,172172 sizeof (struct lpfc_name));···674674 lpfc_free_sysfs_attr(vport);675675 lpfc_debugfs_terminate(vport);676676677677- /* Remove FC host to break driver binding. */678678- fc_remove_host(shost);679679- scsi_remove_host(shost);680680-681677 /* Send the DA_ID and Fabric LOGO to cleanup Nameserver entries. */682678 ndlp = lpfc_findnode_did(vport, Fabric_DID);683679 if (!ndlp)···716720 lpfc_discovery_wait(vport);717721718722skip_logo:723723+724724+ /* Remove FC host to break driver binding. */725725+ fc_remove_host(shost);726726+ scsi_remove_host(shost);719727720728 lpfc_cleanup(vport);721729
···6161 * pmcraid_minor - minor number(s) to use6262 */6363static unsigned int pmcraid_major;6464-static struct class *pmcraid_class;6464+static const struct class pmcraid_class = {6565+ .name = PMCRAID_DEVFILE,6666+};6567static DECLARE_BITMAP(pmcraid_minor, PMCRAID_MAX_ADAPTERS);66686769/*···47254723 if (error)47264724 pmcraid_release_minor(minor);47274725 else47284728- device_create(pmcraid_class, NULL, MKDEV(pmcraid_major, minor),47264726+ device_create(&pmcraid_class, NULL, MKDEV(pmcraid_major, minor),47294727 NULL, "%s%u", PMCRAID_DEVFILE, minor);47304728 return error;47314729}···47414739static void pmcraid_release_chrdev(struct pmcraid_instance *pinstance)47424740{47434741 pmcraid_release_minor(MINOR(pinstance->cdev.dev));47444744- device_destroy(pmcraid_class,47424742+ device_destroy(&pmcraid_class,47454743 MKDEV(pmcraid_major, MINOR(pinstance->cdev.dev)));47464744 cdev_del(&pinstance->cdev);47474745}···53925390 }5393539153945392 pmcraid_major = MAJOR(dev);53955395- pmcraid_class = class_create(PMCRAID_DEVFILE);5396539353975397- if (IS_ERR(pmcraid_class)) {53985398- error = PTR_ERR(pmcraid_class);53945394+ error = class_register(&pmcraid_class);53955395+53965396+ if (error) {53995397 pmcraid_err("failed to register with sysfs, error = %x\n",54005398 error);54015399 goto out_unreg_chrdev;···54045402 error = pmcraid_netlink_init();5405540354065404 if (error) {54075407- class_destroy(pmcraid_class);54055405+ class_unregister(&pmcraid_class);54085406 goto out_unreg_chrdev;54095407 }54105408···5415541354165414 pmcraid_err("failed to register pmcraid driver, error = %x\n",54175415 error);54185418- class_destroy(pmcraid_class);54165416+ class_unregister(&pmcraid_class);54195417 pmcraid_netlink_release();5420541854215419out_unreg_chrdev:···54345432 unregister_chrdev_region(MKDEV(pmcraid_major, 0),54355433 PMCRAID_MAX_ADAPTERS);54365434 pci_unregister_driver(&pmcraid_driver);54375437- class_destroy(pmcraid_class);54355435+ class_unregister(&pmcraid_class);54385436}5439543754405438module_init(pmcraid_init);
+12-2
drivers/scsi/qla2xxx/qla_attr.c
···27412741 return;2742274227432743 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {27442744- qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);27442744+ /* Will wait for wind down of adapter */27452745+ ql_dbg(ql_dbg_aer, fcport->vha, 0x900c,27462746+ "%s pci offline detected (id %06x)\n", __func__,27472747+ fcport->d_id.b24);27482748+ qla_pci_set_eeh_busy(fcport->vha);27492749+ qla2x00_eh_wait_for_pending_commands(fcport->vha, fcport->d_id.b24,27502750+ 0, WAIT_TARGET);27452751 return;27462752 }27472753}···27692763 vha = fcport->vha;2770276427712765 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {27722772- qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);27662766+ /* Will wait for wind down of adapter */27672767+ ql_dbg(ql_dbg_aer, fcport->vha, 0x900b,27682768+ "%s pci offline detected (id %06x)\n", __func__,27692769+ fcport->d_id.b24);27702770+ qla_pci_set_eeh_busy(vha);27732771 qla2x00_eh_wait_for_pending_commands(fcport->vha, fcport->d_id.b24,27742772 0, WAIT_TARGET);27752773 return;
+1-1
drivers/scsi/qla2xxx/qla_def.h
···8282#include "qla_nvme.h"8383#define QLA2XXX_DRIVER_NAME "qla2xxx"8484#define QLA2XXX_APIDEV "ql2xapidev"8585-#define QLA2XXX_MANUFACTURER "Marvell Semiconductor, Inc."8585+#define QLA2XXX_MANUFACTURER "Marvell"86868787/*8888 * We have MAILBOX_REGISTER_COUNT sized arrays in a few places,
+1-1
drivers/scsi/qla2xxx/qla_gbl.h
···4444extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *);45454646extern int qla24xx_els_dcmd_iocb(scsi_qla_host_t *, int, port_id_t);4747-extern int qla24xx_els_dcmd2_iocb(scsi_qla_host_t *, int, fc_port_t *, bool);4747+extern int qla24xx_els_dcmd2_iocb(scsi_qla_host_t *, int, fc_port_t *);4848extern void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha,4949 struct els_plogi *els_plogi);5050
+65-63
drivers/scsi/qla2xxx/qla_init.c
···11931193 return rval;1194119411951195done_free_sp:11961196- /* ref: INIT */11971197- kref_put(&sp->cmd_kref, qla2x00_sp_release);11961196+ /*11971197+ * use qla24xx_async_gnl_sp_done to purge all pending gnl request.11981198+ * kref_put is call behind the scene.11991199+ */12001200+ sp->u.iocb_cmd.u.mbx.in_mb[0] = MBS_COMMAND_ERROR;12011201+ qla24xx_async_gnl_sp_done(sp, QLA_COMMAND_ERROR);11981202 fcport->flags &= ~(FCF_ASYNC_SENT);11991203done:12001204 fcport->flags &= ~(FCF_ASYNC_ACTIVE);···26692665 return rval;26702666}2671266726682668+static void qla_enable_fce_trace(scsi_qla_host_t *vha)26692669+{26702670+ int rval;26712671+ struct qla_hw_data *ha = vha->hw;26722672+26732673+ if (ha->fce) {26742674+ ha->flags.fce_enabled = 1;26752675+ memset(ha->fce, 0, fce_calc_size(ha->fce_bufs));26762676+ rval = qla2x00_enable_fce_trace(vha,26772677+ ha->fce_dma, ha->fce_bufs, ha->fce_mb, &ha->fce_bufs);26782678+26792679+ if (rval) {26802680+ ql_log(ql_log_warn, vha, 0x8033,26812681+ "Unable to reinitialize FCE (%d).\n", rval);26822682+ ha->flags.fce_enabled = 0;26832683+ }26842684+ }26852685+}26862686+26872687+static void qla_enable_eft_trace(scsi_qla_host_t *vha)26882688+{26892689+ int rval;26902690+ struct qla_hw_data *ha = vha->hw;26912691+26922692+ if (ha->eft) {26932693+ memset(ha->eft, 0, EFT_SIZE);26942694+ rval = qla2x00_enable_eft_trace(vha, ha->eft_dma, EFT_NUM_BUFFERS);26952695+26962696+ if (rval) {26972697+ ql_log(ql_log_warn, vha, 0x8034,26982698+ "Unable to reinitialize EFT (%d).\n", rval);26992699+ }27002700+ }27012701+}26722702/*26732703* qla2x00_initialize_adapter26742704* Initialize board.···37063668}3707366937083670static void37093709-qla2x00_init_fce_trace(scsi_qla_host_t *vha)36713671+qla2x00_alloc_fce_trace(scsi_qla_host_t *vha)37103672{37113711- int rval;37123673 dma_addr_t tc_dma;37133674 void *tc;37143675 struct qla_hw_data *ha = vha->hw;···37363699 return;37373700 }3738370137393739- rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS,37403740- ha->fce_mb, &ha->fce_bufs);37413741- if (rval) {37423742- ql_log(ql_log_warn, vha, 0x00bf,37433743- "Unable to initialize FCE (%d).\n", rval);37443744- dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, tc_dma);37453745- return;37463746- }37473747-37483702 ql_dbg(ql_dbg_init, vha, 0x00c0,37493703 "Allocated (%d KB) for FCE...\n", FCE_SIZE / 1024);3750370437513751- ha->flags.fce_enabled = 1;37523705 ha->fce_dma = tc_dma;37533706 ha->fce = tc;37073707+ ha->fce_bufs = FCE_NUM_BUFFERS;37543708}3755370937563710static void37573757-qla2x00_init_eft_trace(scsi_qla_host_t *vha)37113711+qla2x00_alloc_eft_trace(scsi_qla_host_t *vha)37583712{37593759- int rval;37603713 dma_addr_t tc_dma;37613714 void *tc;37623715 struct qla_hw_data *ha = vha->hw;···37713744 return;37723745 }3773374637743774- rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);37753775- if (rval) {37763776- ql_log(ql_log_warn, vha, 0x00c2,37773777- "Unable to initialize EFT (%d).\n", rval);37783778- dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, tc_dma);37793779- return;37803780- }37813781-37823747 ql_dbg(ql_dbg_init, vha, 0x00c3,37833748 "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);3784374937853750 ha->eft_dma = tc_dma;37863751 ha->eft = tc;37873787-}37883788-37893789-static void37903790-qla2x00_alloc_offload_mem(scsi_qla_host_t *vha)37913791-{37923792- qla2x00_init_fce_trace(vha);37933793- qla2x00_init_eft_trace(vha);37943752}3795375337963754void···38323820 if (ha->tgt.atio_ring)38333821 mq_size += ha->tgt.atio_q_length * sizeof(request_t);3834382238353835- qla2x00_init_fce_trace(vha);38233823+ qla2x00_alloc_fce_trace(vha);38363824 if (ha->fce)38373825 fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;38383838- qla2x00_init_eft_trace(vha);38263826+ qla2x00_alloc_eft_trace(vha);38393827 if (ha->eft)38403828 eft_size = EFT_SIZE;38413829 }···42654253 struct qla_hw_data *ha = vha->hw;42664254 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;42674255 unsigned long flags;42684268- uint16_t fw_major_version;42694256 int done_once = 0;4270425742714258 if (IS_P3P_TYPE(ha)) {···43314320 goto failed;4332432143334322enable_82xx_npiv:43344334- fw_major_version = ha->fw_major_version;43354323 if (IS_P3P_TYPE(ha))43364324 qla82xx_check_md_needed(vha);43374325 else···43594349 if (rval != QLA_SUCCESS)43604350 goto failed;4361435143624362- if (!fw_major_version && !(IS_P3P_TYPE(ha)))43634363- qla2x00_alloc_offload_mem(vha);43644364-43654352 if (ql2xallocfwdump && !(IS_P3P_TYPE(ha)))43664353 qla2x00_alloc_fw_dump(vha);4367435443554355+ qla_enable_fce_trace(vha);43564356+ qla_enable_eft_trace(vha);43684357 } else {43694358 goto failed;43704359 }···74967487int74977488qla2x00_abort_isp(scsi_qla_host_t *vha)74987489{74997499- int rval;75007490 uint8_t status = 0;75017491 struct qla_hw_data *ha = vha->hw;75027492 struct scsi_qla_host *vp, *tvp;75037493 struct req_que *req = ha->req_q_map[0];75047494 unsigned long flags;74957495+ fc_port_t *fcport;7505749675067497 if (vha->flags.online) {75077498 qla2x00_abort_isp_cleanup(vha);···75707561 "ISP Abort - ISP reg disconnect post nvmram config, exiting.\n");75717562 return status;75727563 }75647564+75657565+ /* User may have updated [fcp|nvme] prefer in flash */75667566+ list_for_each_entry(fcport, &vha->vp_fcports, list) {75677567+ if (NVME_PRIORITY(ha, fcport))75687568+ fcport->do_prli_nvme = 1;75697569+ else75707570+ fcport->do_prli_nvme = 0;75717571+ }75727572+75737573 if (!qla2x00_restart_isp(vha)) {75747574 clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);75757575···7599758176007582 if (IS_QLA81XX(ha) || IS_QLA8031(ha))76017583 qla2x00_get_fw_version(vha);76027602- if (ha->fce) {76037603- ha->flags.fce_enabled = 1;76047604- memset(ha->fce, 0,76057605- fce_calc_size(ha->fce_bufs));76067606- rval = qla2x00_enable_fce_trace(vha,76077607- ha->fce_dma, ha->fce_bufs, ha->fce_mb,76087608- &ha->fce_bufs);76097609- if (rval) {76107610- ql_log(ql_log_warn, vha, 0x8033,76117611- "Unable to reinitialize FCE "76127612- "(%d).\n", rval);76137613- ha->flags.fce_enabled = 0;76147614- }76157615- }7616758476177617- if (ha->eft) {76187618- memset(ha->eft, 0, EFT_SIZE);76197619- rval = qla2x00_enable_eft_trace(vha,76207620- ha->eft_dma, EFT_NUM_BUFFERS);76217621- if (rval) {76227622- ql_log(ql_log_warn, vha, 0x8034,76237623- "Unable to reinitialize EFT "76247624- "(%d).\n", rval);76257625- }76267626- }76277585 } else { /* failed the ISP abort */76287586 vha->flags.online = 1;76297587 if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {···76487654 if (vp->vp_idx) {76497655 atomic_inc(&vp->vref_count);76507656 spin_unlock_irqrestore(&ha->vport_slock, flags);76577657+76587658+ /* User may have updated [fcp|nvme] prefer in flash */76597659+ list_for_each_entry(fcport, &vp->vp_fcports, list) {76607660+ if (NVME_PRIORITY(ha, fcport))76617661+ fcport->do_prli_nvme = 1;76627662+ else76637663+ fcport->do_prli_nvme = 0;76647664+ }7651766576527666 qla2x00_vp_abort_isp(vp);76537667
···16421642}16431643EXPORT_SYMBOL(scsi_add_device);1644164416451645+int scsi_resume_device(struct scsi_device *sdev)16461646+{16471647+ struct device *dev = &sdev->sdev_gendev;16481648+ int ret = 0;16491649+16501650+ device_lock(dev);16511651+16521652+ /*16531653+ * Bail out if the device or its queue are not running. Otherwise,16541654+ * the rescan may block waiting for commands to be executed, with us16551655+ * holding the device lock. This can result in a potential deadlock16561656+ * in the power management core code when system resume is on-going.16571657+ */16581658+ if (sdev->sdev_state != SDEV_RUNNING ||16591659+ blk_queue_pm_only(sdev->request_queue)) {16601660+ ret = -EWOULDBLOCK;16611661+ goto unlock;16621662+ }16631663+16641664+ if (dev->driver && try_module_get(dev->driver->owner)) {16651665+ struct scsi_driver *drv = to_scsi_driver(dev->driver);16661666+16671667+ if (drv->resume)16681668+ ret = drv->resume(dev);16691669+ module_put(dev->driver->owner);16701670+ }16711671+16721672+unlock:16731673+ device_unlock(dev);16741674+16751675+ return ret;16761676+}16771677+EXPORT_SYMBOL(scsi_resume_device);16781678+16451679int scsi_rescan_device(struct scsi_device *sdev)16461680{16471681 struct device *dev = &sdev->sdev_gendev;
···214214215215 res = dfc->power_ops->get_real_power(df, power, freq, voltage);216216 if (!res) {217217- state = dfc->capped_state;217217+ state = dfc->max_state - dfc->capped_state;218218219219 /* Convert EM power into milli-Watts first */220220 rcu_read_lock();
+2-17
drivers/thermal/thermal_trip.c
···6565{6666 const struct thermal_trip *trip;6767 int low = -INT_MAX, high = INT_MAX;6868- bool same_trip = false;6968 int ret;70697170 lockdep_assert_held(&tz->lock);···7374 return;74757576 for_each_trip(tz, trip) {7676- bool low_set = false;7777 int trip_low;78787979 trip_low = trip->temperature - trip->hysteresis;80808181- if (trip_low < tz->temperature && trip_low > low) {8181+ if (trip_low < tz->temperature && trip_low > low)8282 low = trip_low;8383- low_set = true;8484- same_trip = false;8585- }86838784 if (trip->temperature > tz->temperature &&8888- trip->temperature < high) {8585+ trip->temperature < high)8986 high = trip->temperature;9090- same_trip = low_set;9191- }9287 }93889489 /* No need to change trip points */9590 if (tz->prev_low_trip == low && tz->prev_high_trip == high)9696- return;9797-9898- /*9999- * If "high" and "low" are the same, skip the change unless this is the100100- * first time.101101- */102102- if (same_trip && (tz->prev_low_trip != -INT_MAX ||103103- tz->prev_high_trip != INT_MAX))10491 return;1059210693 tz->prev_low_trip = low;
+1-1
drivers/ufs/core/ufs-mcq.c
···94949595 val = ufshcd_readl(hba, REG_UFS_MCQ_CFG);9696 val &= ~MCQ_CFG_MAC_MASK;9797- val |= FIELD_PREP(MCQ_CFG_MAC_MASK, max_active_cmds);9797+ val |= FIELD_PREP(MCQ_CFG_MAC_MASK, max_active_cmds - 1);9898 ufshcd_writel(hba, val, REG_UFS_MCQ_CFG);9999}100100EXPORT_SYMBOL_GPL(ufshcd_mcq_config_mac);
+4-2
drivers/ufs/host/ufs-qcom.c
···1210121012111211 list_for_each_entry(clki, head, list) {12121212 if (!IS_ERR_OR_NULL(clki->clk) &&12131213- !strcmp(clki->name, "core_clk_unipro")) {12141214- if (is_scale_up)12131213+ !strcmp(clki->name, "core_clk_unipro")) {12141214+ if (!clki->max_freq)12151215+ cycles_in_1us = 150; /* default for backwards compatibility */12161216+ else if (is_scale_up)12151217 cycles_in_1us = ceil(clki->max_freq, (1000 * 1000));12161218 else12171219 cycles_in_1us = ceil(clk_get_rate(clki->clk), (1000 * 1000));
···485485static int service_outstanding_interrupt(struct wdm_device *desc)486486{487487 int rv = 0;488488+ int used;488489489490 /* submit read urb only if the device is waiting for it */490491 if (!desc->resp_count || !--desc->resp_count)···500499 goto out;501500 }502501503503- set_bit(WDM_RESPONDING, &desc->flags);502502+ used = test_and_set_bit(WDM_RESPONDING, &desc->flags);503503+ if (used)504504+ goto out;505505+504506 spin_unlock_irq(&desc->iuspin);505507 rv = usb_submit_urb(desc->response, GFP_KERNEL);506508 spin_lock_irq(&desc->iuspin);
+16-7
drivers/usb/core/hub.c
···130130#define HUB_DEBOUNCE_STEP 25131131#define HUB_DEBOUNCE_STABLE 100132132133133-static void hub_release(struct kref *kref);134133static int usb_reset_and_verify_device(struct usb_device *udev);135134static int hub_port_disable(struct usb_hub *hub, int port1, int set_state);136135static bool hub_port_warm_reset_required(struct usb_hub *hub, int port1,···719720 */720721 intf = to_usb_interface(hub->intfdev);721722 usb_autopm_get_interface_no_resume(intf);722722- kref_get(&hub->kref);723723+ hub_get(hub);723724724725 if (queue_work(hub_wq, &hub->events))725726 return;726727727728 /* the work has already been scheduled */728729 usb_autopm_put_interface_async(intf);729729- kref_put(&hub->kref, hub_release);730730+ hub_put(hub);730731}731732732733void usb_kick_hub_wq(struct usb_device *hdev)···10941095 goto init2;10951096 goto init3;10961097 }10971097- kref_get(&hub->kref);10981098+ hub_get(hub);1098109910991100 /* The superspeed hub except for root hub has to use Hub Depth11001101 * value as an offset into the route string to locate the bits···13421343 device_unlock(&hdev->dev);13431344 }1344134513451345- kref_put(&hub->kref, hub_release);13461346+ hub_put(hub);13461347}1347134813481349/* Implement the continuations for the delays above */···17581759 kfree(hub);17591760}1760176117621762+void hub_get(struct usb_hub *hub)17631763+{17641764+ kref_get(&hub->kref);17651765+}17661766+17671767+void hub_put(struct usb_hub *hub)17681768+{17691769+ kref_put(&hub->kref, hub_release);17701770+}17711771+17611772static unsigned highspeed_hubs;1762177317631774static void hub_disconnect(struct usb_interface *intf)···1816180718171808 onboard_dev_destroy_pdevs(&hub->onboard_devs);1818180918191819- kref_put(&hub->kref, hub_release);18101810+ hub_put(hub);18201811}1821181218221813static bool hub_descriptor_is_sane(struct usb_host_interface *desc)···5943593459445935 /* Balance the stuff in kick_hub_wq() and allow autosuspend */59455936 usb_autopm_put_interface(intf);59465946- kref_put(&hub->kref, hub_release);59375937+ hub_put(hub);5947593859485939 kcov_remote_stop();59495940}
+2
drivers/usb/core/hub.h
···129129extern int usb_hub_set_port_power(struct usb_device *hdev, struct usb_hub *hub,130130 int port1, bool set);131131extern struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev);132132+extern void hub_get(struct usb_hub *hub);133133+extern void hub_put(struct usb_hub *hub);132134extern int hub_port_debounce(struct usb_hub *hub, int port1,133135 bool must_be_connected);134136extern int usb_clear_port_feature(struct usb_device *hdev,
+34-4
drivers/usb/core/port.c
···5656 u16 portstatus, unused;5757 bool disabled;5858 int rc;5959+ struct kernfs_node *kn;59606161+ hub_get(hub);6062 rc = usb_autopm_get_interface(intf);6163 if (rc < 0)6262- return rc;6464+ goto out_hub_get;63656666+ /*6767+ * Prevent deadlock if another process is concurrently6868+ * trying to unregister hdev.6969+ */7070+ kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);7171+ if (!kn) {7272+ rc = -ENODEV;7373+ goto out_autopm;7474+ }6475 usb_lock_device(hdev);6576 if (hub->disconnected) {6677 rc = -ENODEV;···8170 usb_hub_port_status(hub, port1, &portstatus, &unused);8271 disabled = !usb_port_is_power_on(hub, portstatus);83728484-out_hdev_lock:7373+ out_hdev_lock:8574 usb_unlock_device(hdev);7575+ sysfs_unbreak_active_protection(kn);7676+ out_autopm:8677 usb_autopm_put_interface(intf);7878+ out_hub_get:7979+ hub_put(hub);87808881 if (rc)8982 return rc;···10590 int port1 = port_dev->portnum;10691 bool disabled;10792 int rc;9393+ struct kernfs_node *kn;1089410995 rc = kstrtobool(buf, &disabled);11096 if (rc)11197 return rc;112989999+ hub_get(hub);113100 rc = usb_autopm_get_interface(intf);114101 if (rc < 0)115115- return rc;102102+ goto out_hub_get;116103104104+ /*105105+ * Prevent deadlock if another process is concurrently106106+ * trying to unregister hdev.107107+ */108108+ kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);109109+ if (!kn) {110110+ rc = -ENODEV;111111+ goto out_autopm;112112+ }117113 usb_lock_device(hdev);118114 if (hub->disconnected) {119115 rc = -ENODEV;···145119 if (!rc)146120 rc = count;147121148148-out_hdev_lock:122122+ out_hdev_lock:149123 usb_unlock_device(hdev);124124+ sysfs_unbreak_active_protection(kn);125125+ out_autopm:150126 usb_autopm_put_interface(intf);127127+ out_hub_get:128128+ hub_put(hub);151129152130 return rc;153131}
+13-3
drivers/usb/core/sysfs.c
···12171217{12181218 struct usb_interface *intf = to_usb_interface(dev);12191219 bool val;12201220+ struct kernfs_node *kn;1220122112211222 if (kstrtobool(buf, &val) != 0)12221223 return -EINVAL;1223122412241224- if (val)12251225+ if (val) {12251226 usb_authorize_interface(intf);12261226- else12271227- usb_deauthorize_interface(intf);12271227+ } else {12281228+ /*12291229+ * Prevent deadlock if another process is concurrently12301230+ * trying to unregister intf.12311231+ */12321232+ kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);12331233+ if (kn) {12341234+ usb_deauthorize_interface(intf);12351235+ sysfs_unbreak_active_protection(kn);12361236+ }12371237+ }1228123812291239 return count;12301240}
+14
drivers/usb/dwc2/core.h
···735735 * struct dwc2_hregs_backup - Holds host registers state before736736 * entering partial power down737737 * @hcfg: Backup of HCFG register738738+ * @hflbaddr: Backup of HFLBADDR register738739 * @haintmsk: Backup of HAINTMSK register740740+ * @hcchar: Backup of HCCHAR register741741+ * @hcsplt: Backup of HCSPLT register739742 * @hcintmsk: Backup of HCINTMSK register743743+ * @hctsiz: Backup of HCTSIZ register744744+ * @hdma: Backup of HCDMA register745745+ * @hcdmab: Backup of HCDMAB register740746 * @hprt0: Backup of HPTR0 register741747 * @hfir: Backup of HFIR register742748 * @hptxfsiz: Backup of HPTXFSIZ register···750744 */751745struct dwc2_hregs_backup {752746 u32 hcfg;747747+ u32 hflbaddr;753748 u32 haintmsk;749749+ u32 hcchar[MAX_EPS_CHANNELS];750750+ u32 hcsplt[MAX_EPS_CHANNELS];754751 u32 hcintmsk[MAX_EPS_CHANNELS];752752+ u32 hctsiz[MAX_EPS_CHANNELS];753753+ u32 hcidma[MAX_EPS_CHANNELS];754754+ u32 hcidmab[MAX_EPS_CHANNELS];755755 u32 hprt0;756756 u32 hfir;757757 u32 hptxfsiz;···11041092 bool needs_byte_swap;1105109311061094 /* DWC OTG HW Release versions */10951095+#define DWC2_CORE_REV_4_30a 0x4f54430a11071096#define DWC2_CORE_REV_2_71a 0x4f54271a11081097#define DWC2_CORE_REV_2_72a 0x4f54272a11091098#define DWC2_CORE_REV_2_80a 0x4f54280a···13441331int dwc2_restore_global_registers(struct dwc2_hsotg *hsotg);1345133213461333void dwc2_enable_acg(struct dwc2_hsotg *hsotg);13341334+void dwc2_wakeup_from_lpm_l1(struct dwc2_hsotg *hsotg, bool remotewakeup);1347133513481336/* This function should be called on every hardware interrupt. */13491337irqreturn_t dwc2_handle_common_intr(int irq, void *dev);
+48-24
drivers/usb/dwc2/core_intr.c
···312312313313 /* Exit gadget mode clock gating. */314314 if (hsotg->params.power_down ==315315- DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended)315315+ DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended &&316316+ !hsotg->params.no_clock_gating)316317 dwc2_gadget_exit_clock_gating(hsotg, 0);317318 }318319···338337 * @hsotg: Programming view of DWC_otg controller339338 *340339 */341341-static void dwc2_wakeup_from_lpm_l1(struct dwc2_hsotg *hsotg)340340+void dwc2_wakeup_from_lpm_l1(struct dwc2_hsotg *hsotg, bool remotewakeup)342341{343342 u32 glpmcfg;344344- u32 i = 0;343343+ u32 pcgctl;344344+ u32 dctl;345345346346 if (hsotg->lx_state != DWC2_L1) {347347 dev_err(hsotg->dev, "Core isn't in DWC2_L1 state\n");···351349352350 glpmcfg = dwc2_readl(hsotg, GLPMCFG);353351 if (dwc2_is_device_mode(hsotg)) {354354- dev_dbg(hsotg->dev, "Exit from L1 state\n");352352+ dev_dbg(hsotg->dev, "Exit from L1 state, remotewakeup=%d\n", remotewakeup);355353 glpmcfg &= ~GLPMCFG_ENBLSLPM;356356- glpmcfg &= ~GLPMCFG_HIRD_THRES_EN;354354+ glpmcfg &= ~GLPMCFG_HIRD_THRES_MASK;357355 dwc2_writel(hsotg, glpmcfg, GLPMCFG);358356359359- do {360360- glpmcfg = dwc2_readl(hsotg, GLPMCFG);357357+ pcgctl = dwc2_readl(hsotg, PCGCTL);358358+ pcgctl &= ~PCGCTL_ENBL_SLEEP_GATING;359359+ dwc2_writel(hsotg, pcgctl, PCGCTL);361360362362- if (!(glpmcfg & (GLPMCFG_COREL1RES_MASK |363363- GLPMCFG_L1RESUMEOK | GLPMCFG_SLPSTS)))364364- break;361361+ glpmcfg = dwc2_readl(hsotg, GLPMCFG);362362+ if (glpmcfg & GLPMCFG_ENBESL) {363363+ glpmcfg |= GLPMCFG_RSTRSLPSTS;364364+ dwc2_writel(hsotg, glpmcfg, GLPMCFG);365365+ }365366366366- udelay(1);367367- } while (++i < 200);367367+ if (remotewakeup) {368368+ if (dwc2_hsotg_wait_bit_set(hsotg, GLPMCFG, GLPMCFG_L1RESUMEOK, 1000)) {369369+ dev_warn(hsotg->dev, "%s: timeout GLPMCFG_L1RESUMEOK\n", __func__);370370+ goto fail;371371+ return;372372+ }368373369369- if (i == 200) {370370- dev_err(hsotg->dev, "Failed to exit L1 sleep state in 200us.\n");374374+ dctl = dwc2_readl(hsotg, DCTL);375375+ dctl |= DCTL_RMTWKUPSIG;376376+ dwc2_writel(hsotg, dctl, DCTL);377377+378378+ if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS, GINTSTS_WKUPINT, 1000)) {379379+ dev_warn(hsotg->dev, "%s: timeout GINTSTS_WKUPINT\n", __func__);380380+ goto fail;381381+ return;382382+ }383383+ }384384+385385+ glpmcfg = dwc2_readl(hsotg, GLPMCFG);386386+ if (glpmcfg & GLPMCFG_COREL1RES_MASK || glpmcfg & GLPMCFG_SLPSTS ||387387+ glpmcfg & GLPMCFG_L1RESUMEOK) {388388+ goto fail;371389 return;372390 }373373- dwc2_gadget_init_lpm(hsotg);391391+392392+ /* Inform gadget to exit from L1 */393393+ call_gadget(hsotg, resume);394394+ /* Change to L0 state */395395+ hsotg->lx_state = DWC2_L0;396396+ hsotg->bus_suspended = false;397397+fail: dwc2_gadget_init_lpm(hsotg);374398 } else {375399 /* TODO */376400 dev_err(hsotg->dev, "Host side LPM is not supported.\n");377401 return;378402 }379379-380380- /* Change to L0 state */381381- hsotg->lx_state = DWC2_L0;382382-383383- /* Inform gadget to exit from L1 */384384- call_gadget(hsotg, resume);385403}386404387405/*···422400 dev_dbg(hsotg->dev, "%s lxstate = %d\n", __func__, hsotg->lx_state);423401424402 if (hsotg->lx_state == DWC2_L1) {425425- dwc2_wakeup_from_lpm_l1(hsotg);403403+ dwc2_wakeup_from_lpm_l1(hsotg, false);426404 return;427405 }428406···445423446424 /* Exit gadget mode clock gating. */447425 if (hsotg->params.power_down ==448448- DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended)426426+ DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended &&427427+ !hsotg->params.no_clock_gating)449428 dwc2_gadget_exit_clock_gating(hsotg, 0);450429 } else {451430 /* Change to L0 state */···463440 }464441465442 if (hsotg->params.power_down ==466466- DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended)443443+ DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended &&444444+ !hsotg->params.no_clock_gating)467445 dwc2_host_exit_clock_gating(hsotg, 1);468446469447 /*
+10
drivers/usb/dwc2/gadget.c
···14151415 ep->name, req, req->length, req->buf, req->no_interrupt,14161416 req->zero, req->short_not_ok);1417141714181418+ if (hs->lx_state == DWC2_L1) {14191419+ dwc2_wakeup_from_lpm_l1(hs, true);14201420+ }14211421+14181422 /* Prevent new request submission when controller is suspended */14191423 if (hs->lx_state != DWC2_L0) {14201424 dev_dbg(hs->dev, "%s: submit request only in active state\n",···37333729 /* This event must be used only if controller is suspended */37343730 if (hsotg->in_ppd && hsotg->lx_state == DWC2_L2)37353731 dwc2_exit_partial_power_down(hsotg, 0, true);37323732+37333733+ /* Exit gadget mode clock gating. */37343734+ if (hsotg->params.power_down ==37353735+ DWC2_POWER_DOWN_PARAM_NONE && hsotg->bus_suspended &&37363736+ !hsotg->params.no_clock_gating)37373737+ dwc2_gadget_exit_clock_gating(hsotg, 0);3736373837373739 hsotg->lx_state = DWC2_L0;37383740 }
+40-9
drivers/usb/dwc2/hcd.c
···27012701 hsotg->available_host_channels--;27022702 }27032703 qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry);27042704- if (dwc2_assign_and_init_hc(hsotg, qh))27042704+ if (dwc2_assign_and_init_hc(hsotg, qh)) {27052705+ if (hsotg->params.uframe_sched)27062706+ hsotg->available_host_channels++;27052707 break;27082708+ }2706270927072710 /*27082711 * Move the QH from the periodic ready schedule to the···27382735 hsotg->available_host_channels--;27392736 }2740273727412741- if (dwc2_assign_and_init_hc(hsotg, qh))27382738+ if (dwc2_assign_and_init_hc(hsotg, qh)) {27392739+ if (hsotg->params.uframe_sched)27402740+ hsotg->available_host_channels++;27422741 break;27422742+ }2743274327442744 /*27452745 * Move the QH from the non-periodic inactive schedule to the···41494143 urb->actual_length);4150414441514145 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {41464146+ if (!hsotg->params.dma_desc_enable)41474147+ urb->start_frame = qtd->qh->start_active_frame;41524148 urb->error_count = dwc2_hcd_urb_get_error_count(qtd->urb);41534149 for (i = 0; i < urb->number_of_packets; ++i) {41544150 urb->iso_frame_desc[i].actual_length =···46574649 }4658465046594651 if (hsotg->params.power_down == DWC2_POWER_DOWN_PARAM_NONE &&46604660- hsotg->bus_suspended) {46524652+ hsotg->bus_suspended && !hsotg->params.no_clock_gating) {46614653 if (dwc2_is_device_mode(hsotg))46624654 dwc2_gadget_exit_clock_gating(hsotg, 0);46634655 else···54145406 /* Backup Host regs */54155407 hr = &hsotg->hr_backup;54165408 hr->hcfg = dwc2_readl(hsotg, HCFG);54095409+ hr->hflbaddr = dwc2_readl(hsotg, HFLBADDR);54175410 hr->haintmsk = dwc2_readl(hsotg, HAINTMSK);54185418- for (i = 0; i < hsotg->params.host_channels; ++i)54115411+ for (i = 0; i < hsotg->params.host_channels; ++i) {54125412+ hr->hcchar[i] = dwc2_readl(hsotg, HCCHAR(i));54135413+ hr->hcsplt[i] = dwc2_readl(hsotg, HCSPLT(i));54195414 hr->hcintmsk[i] = dwc2_readl(hsotg, HCINTMSK(i));54155415+ hr->hctsiz[i] = dwc2_readl(hsotg, HCTSIZ(i));54165416+ hr->hcidma[i] = dwc2_readl(hsotg, HCDMA(i));54175417+ hr->hcidmab[i] = dwc2_readl(hsotg, HCDMAB(i));54185418+ }5420541954215420 hr->hprt0 = dwc2_read_hprt0(hsotg);54225421 hr->hfir = dwc2_readl(hsotg, HFIR);···54575442 hr->valid = false;5458544354595444 dwc2_writel(hsotg, hr->hcfg, HCFG);54455445+ dwc2_writel(hsotg, hr->hflbaddr, HFLBADDR);54605446 dwc2_writel(hsotg, hr->haintmsk, HAINTMSK);5461544754625462- for (i = 0; i < hsotg->params.host_channels; ++i)54485448+ for (i = 0; i < hsotg->params.host_channels; ++i) {54495449+ dwc2_writel(hsotg, hr->hcchar[i], HCCHAR(i));54505450+ dwc2_writel(hsotg, hr->hcsplt[i], HCSPLT(i));54635451 dwc2_writel(hsotg, hr->hcintmsk[i], HCINTMSK(i));54525452+ dwc2_writel(hsotg, hr->hctsiz[i], HCTSIZ(i));54535453+ dwc2_writel(hsotg, hr->hcidma[i], HCDMA(i));54545454+ dwc2_writel(hsotg, hr->hcidmab[i], HCDMAB(i));54555455+ }5464545654655457 dwc2_writel(hsotg, hr->hprt0, HPRT0);54665458 dwc2_writel(hsotg, hr->hfir, HFIR);···56425620 dwc2_writel(hsotg, gpwrdn, GPWRDN);5643562156445622 /* De-assert Wakeup Logic */56455645- gpwrdn = dwc2_readl(hsotg, GPWRDN);56465646- gpwrdn &= ~GPWRDN_PMUACTV;56475647- dwc2_writel(hsotg, gpwrdn, GPWRDN);56485648- udelay(10);56235623+ if (!(rem_wakeup && hsotg->hw_params.snpsid >= DWC2_CORE_REV_4_30a)) {56245624+ gpwrdn = dwc2_readl(hsotg, GPWRDN);56255625+ gpwrdn &= ~GPWRDN_PMUACTV;56265626+ dwc2_writel(hsotg, gpwrdn, GPWRDN);56275627+ udelay(10);56285628+ }5649562956505630 hprt0 = hr->hprt0;56515631 hprt0 |= HPRT0_PWR;···56725648 hprt0 |= HPRT0_RES;56735649 dwc2_writel(hsotg, hprt0, HPRT0);5674565056515651+ /* De-assert Wakeup Logic */56525652+ if ((rem_wakeup && hsotg->hw_params.snpsid >= DWC2_CORE_REV_4_30a)) {56535653+ gpwrdn = dwc2_readl(hsotg, GPWRDN);56545654+ gpwrdn &= ~GPWRDN_PMUACTV;56555655+ dwc2_writel(hsotg, gpwrdn, GPWRDN);56565656+ udelay(10);56575657+ }56755658 /* Wait for Resume time and then program HPRT again */56765659 mdelay(100);56775660 hprt0 &= ~HPRT0_RES;
+11-6
drivers/usb/dwc2/hcd_ddma.c
···559559 idx = qh->td_last;560560 inc = qh->host_interval;561561 hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);562562- cur_idx = dwc2_frame_list_idx(hsotg->frame_number);562562+ cur_idx = idx;563563 next_idx = dwc2_desclist_idx_inc(qh->td_last, inc, qh->dev_speed);564564565565 /*···866866{867867 struct dwc2_dma_desc *dma_desc;868868 struct dwc2_hcd_iso_packet_desc *frame_desc;869869+ u16 frame_desc_idx;870870+ struct urb *usb_urb = qtd->urb->priv;869871 u16 remain = 0;870872 int rc = 0;871873···880878 DMA_FROM_DEVICE);881879882880 dma_desc = &qh->desc_list[idx];881881+ frame_desc_idx = (idx - qtd->isoc_td_first) & (usb_urb->number_of_packets - 1);883882884884- frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];883883+ frame_desc = &qtd->urb->iso_descs[frame_desc_idx];884884+ if (idx == qtd->isoc_td_first)885885+ usb_urb->start_frame = dwc2_hcd_get_frame_number(hsotg);885886 dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset);886887 if (chan->ep_is_in)887888 remain = (dma_desc->status & HOST_DMA_ISOC_NBYTES_MASK) >>···905900 frame_desc->status = 0;906901 }907902908908- if (++qtd->isoc_frame_index == qtd->urb->packet_count) {903903+ if (++qtd->isoc_frame_index == usb_urb->number_of_packets) {909904 /*910905 * urb->status is not used for isoc transfers here. The911906 * individual frame_desc status are used instead.···10101005 return;10111006 idx = dwc2_desclist_idx_inc(idx, qh->host_interval,10121007 chan->speed);10131013- if (!rc)10081008+ if (rc == 0)10141009 continue;1015101010161016- if (rc == DWC2_CMPL_DONE)10171017- break;10111011+ if (rc == DWC2_CMPL_DONE || rc == DWC2_CMPL_STOP)10121012+ goto stop_scan;1018101310191014 /* rc == DWC2_CMPL_STOP */10201015
···11331133 * 3 - Reserved11341134 * @dis_metastability_quirk: set to disable metastability quirk.11351135 * @dis_split_quirk: set to disable split boundary.11361136+ * @sys_wakeup: set if the device may do system wakeup.11361137 * @wakeup_configured: set if the device is configured for remote wakeup.11371138 * @suspended: set to track suspend event due to U3/L2.11381139 * @imod_interval: set the interrupt moderation interval in 250ns···1358135713591358 unsigned dis_split_quirk:1;13601359 unsigned async_callbacks:1;13601360+ unsigned sys_wakeup:1;13611361 unsigned wakeup_configured:1;13621362 unsigned suspended:1;13631363
···29552955 dwc->gadget_driver = driver;29562956 spin_unlock_irqrestore(&dwc->lock, flags);2957295729582958+ if (dwc->sys_wakeup)29592959+ device_wakeup_enable(dwc->sysdev);29602960+29582961 return 0;29592962}29602963···29722969{29732970 struct dwc3 *dwc = gadget_to_dwc(g);29742971 unsigned long flags;29722972+29732973+ if (dwc->sys_wakeup)29742974+ device_wakeup_disable(dwc->sysdev);2975297529762976 spin_lock_irqsave(&dwc->lock, flags);29772977 dwc->gadget_driver = NULL;···46564650 dwc3_gadget_set_ssp_rate(dwc->gadget, dwc->max_ssp_rate);46574651 else46584652 dwc3_gadget_set_speed(dwc->gadget, dwc->maximum_speed);46534653+46544654+ /* No system wakeup if no gadget driver bound */46554655+ if (dwc->sys_wakeup)46564656+ device_wakeup_disable(dwc->sysdev);4659465746604658 return 0;46614659
+11
drivers/usb/dwc3/host.c
···173173 goto err;174174 }175175176176+ if (dwc->sys_wakeup) {177177+ /* Restore wakeup setting if switched from device */178178+ device_wakeup_enable(dwc->sysdev);179179+180180+ /* Pass on wakeup setting to the new xhci platform device */181181+ device_init_wakeup(&xhci->dev, true);182182+ }183183+176184 return 0;177185err:178186 platform_device_put(xhci);···189181190182void dwc3_host_exit(struct dwc3 *dwc)191183{184184+ if (dwc->sys_wakeup)185185+ device_init_wakeup(&dwc->xhci->dev, false);186186+192187 platform_device_unregister(dwc->xhci);193188 dwc->xhci = NULL;194189}
+3-1
drivers/usb/gadget/udc/core.c
···292292{293293 int ret = 0;294294295295- if (WARN_ON_ONCE(!ep->enabled && ep->address)) {295295+ if (!ep->enabled && ep->address) {296296+ pr_debug("USB gadget: queue request to disabled ep 0x%x (%s)\n",297297+ ep->address, ep->name);296298 ret = -ESHUTDOWN;297299 goto out;298300 }
+9-13
drivers/usb/misc/usb-ljca.c
···518518 int ret;519519520520 client = kzalloc(sizeof *client, GFP_KERNEL);521521- if (!client)521521+ if (!client) {522522+ kfree(data);522523 return -ENOMEM;524524+ }523525524526 client->type = type;525527 client->id = id;···537535 auxdev->dev.release = ljca_auxdev_release;538536539537 ret = auxiliary_device_init(auxdev);540540- if (ret)538538+ if (ret) {539539+ kfree(data);541540 goto err_free;541541+ }542542543543 ljca_auxdev_acpi_bind(adap, auxdev, adr, id);544544···594590 valid_pin[i] = get_unaligned_le32(&desc->bank_desc[i].valid_pins);595591 bitmap_from_arr32(gpio_info->valid_pin_map, valid_pin, gpio_num);596592597597- ret = ljca_new_client_device(adap, LJCA_CLIENT_GPIO, 0, "ljca-gpio",593593+ return ljca_new_client_device(adap, LJCA_CLIENT_GPIO, 0, "ljca-gpio",598594 gpio_info, LJCA_GPIO_ACPI_ADR);599599- if (ret)600600- kfree(gpio_info);601601-602602- return ret;603595}604596605597static int ljca_enumerate_i2c(struct ljca_adapter *adap)···629629 ret = ljca_new_client_device(adap, LJCA_CLIENT_I2C, i,630630 "ljca-i2c", i2c_info,631631 LJCA_I2C1_ACPI_ADR + i);632632- if (ret) {633633- kfree(i2c_info);632632+ if (ret)634633 return ret;635635- }636634 }637635638636 return 0;···667669 ret = ljca_new_client_device(adap, LJCA_CLIENT_SPI, i,668670 "ljca-spi", spi_info,669671 LJCA_SPI1_ACPI_ADR + i);670670- if (ret) {671671- kfree(spi_info);672672+ if (ret)672673 return ret;673673- }674674 }675675676676 return 0;
-7
drivers/usb/phy/phy-generic.c
···262262 return dev_err_probe(dev, PTR_ERR(nop->vbus_draw),263263 "could not get vbus regulator\n");264264265265- nop->vbus_draw = devm_regulator_get_exclusive(dev, "vbus");266266- if (PTR_ERR(nop->vbus_draw) == -ENODEV)267267- nop->vbus_draw = NULL;268268- if (IS_ERR(nop->vbus_draw))269269- return dev_err_probe(dev, PTR_ERR(nop->vbus_draw),270270- "could not get vbus regulator\n");271271-272265 nop->dev = dev;273266 nop->phy.dev = nop->dev;274267 nop->phy.label = "nop-xceiv";
···2323 void *base;2424 struct completion complete;2525 unsigned long flags;2626+#define UCSI_ACPI_SUPPRESS_EVENT 02727+#define UCSI_ACPI_COMMAND_PENDING 12828+#define UCSI_ACPI_ACK_PENDING 22629 guid_t guid;2730 u64 cmd;2828- bool dell_quirk_probed;2929- bool dell_quirk_active;3031};31323233static int ucsi_acpi_dsm(struct ucsi_acpi *ua, int func)···8079 int ret;81808281 if (ack)8383- set_bit(ACK_PENDING, &ua->flags);8282+ set_bit(UCSI_ACPI_ACK_PENDING, &ua->flags);8483 else8585- set_bit(COMMAND_PENDING, &ua->flags);8484+ set_bit(UCSI_ACPI_COMMAND_PENDING, &ua->flags);86858786 ret = ucsi_acpi_async_write(ucsi, offset, val, val_len);8887 if (ret)···93929493out_clear_bit:9594 if (ack)9696- clear_bit(ACK_PENDING, &ua->flags);9595+ clear_bit(UCSI_ACPI_ACK_PENDING, &ua->flags);9796 else9898- clear_bit(COMMAND_PENDING, &ua->flags);9797+ clear_bit(UCSI_ACPI_COMMAND_PENDING, &ua->flags);999810099 return ret;101100}···130129};131130132131/*133133- * Some Dell laptops expect that an ACK command with the134134- * UCSI_ACK_CONNECTOR_CHANGE bit set is followed by a (separate)135135- * ACK command that only has the UCSI_ACK_COMMAND_COMPLETE bit set.136136- * If this is not done events are not delivered to OSPM and137137- * subsequent commands will timeout.132132+ * Some Dell laptops don't like ACK commands with the133133+ * UCSI_ACK_CONNECTOR_CHANGE but not the UCSI_ACK_COMMAND_COMPLETE134134+ * bit set. To work around this send a dummy command and bundle the135135+ * UCSI_ACK_CONNECTOR_CHANGE with the UCSI_ACK_COMMAND_COMPLETE136136+ * for the dummy command.138137 */139138static int140139ucsi_dell_sync_write(struct ucsi *ucsi, unsigned int offset,141140 const void *val, size_t val_len)142141{143142 struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);144144- u64 cmd = *(u64 *)val, ack = 0;143143+ u64 cmd = *(u64 *)val;144144+ u64 dummycmd = UCSI_GET_CAPABILITY;145145 int ret;146146147147- if (UCSI_COMMAND(cmd) == UCSI_ACK_CC_CI &&148148- cmd & UCSI_ACK_CONNECTOR_CHANGE)149149- ack = UCSI_ACK_CC_CI | UCSI_ACK_COMMAND_COMPLETE;147147+ if (cmd == (UCSI_ACK_CC_CI | UCSI_ACK_CONNECTOR_CHANGE)) {148148+ cmd |= UCSI_ACK_COMMAND_COMPLETE;150149151151- ret = ucsi_acpi_sync_write(ucsi, offset, val, val_len);152152- if (ret != 0)153153- return ret;154154- if (ack == 0)155155- return ret;150150+ /*151151+ * The UCSI core thinks it is sending a connector change ack152152+ * and will accept new connector change events. We don't want153153+ * this to happen for the dummy command as its response will154154+ * still report the very event that the core is trying to clear.155155+ */156156+ set_bit(UCSI_ACPI_SUPPRESS_EVENT, &ua->flags);157157+ ret = ucsi_acpi_sync_write(ucsi, UCSI_CONTROL, &dummycmd,158158+ sizeof(dummycmd));159159+ clear_bit(UCSI_ACPI_SUPPRESS_EVENT, &ua->flags);156160157157- if (!ua->dell_quirk_probed) {158158- ua->dell_quirk_probed = true;159159-160160- cmd = UCSI_GET_CAPABILITY;161161- ret = ucsi_acpi_sync_write(ucsi, UCSI_CONTROL, &cmd,162162- sizeof(cmd));163163- if (ret == 0)164164- return ucsi_acpi_sync_write(ucsi, UCSI_CONTROL,165165- &ack, sizeof(ack));166166- if (ret != -ETIMEDOUT)161161+ if (ret < 0)167162 return ret;168168-169169- ua->dell_quirk_active = true;170170- dev_err(ua->dev, "Firmware bug: Additional ACK required after ACKing a connector change.\n");171171- dev_err(ua->dev, "Firmware bug: Enabling workaround\n");172163 }173164174174- if (!ua->dell_quirk_active)175175- return ret;176176-177177- return ucsi_acpi_sync_write(ucsi, UCSI_CONTROL, &ack, sizeof(ack));165165+ return ucsi_acpi_sync_write(ucsi, UCSI_CONTROL, &cmd, sizeof(cmd));178166}179167180168static const struct ucsi_operations ucsi_dell_ops = {···199209 if (ret)200210 return;201211202202- if (UCSI_CCI_CONNECTOR(cci))212212+ if (UCSI_CCI_CONNECTOR(cci) &&213213+ !test_bit(UCSI_ACPI_SUPPRESS_EVENT, &ua->flags))203214 ucsi_connector_change(ua->ucsi, UCSI_CCI_CONNECTOR(cci));204215205216 if (cci & UCSI_CCI_ACK_COMPLETE && test_bit(ACK_PENDING, &ua->flags))206217 complete(&ua->complete);207218 if (cci & UCSI_CCI_COMMAND_COMPLETE &&208208- test_bit(COMMAND_PENDING, &ua->flags))219219+ test_bit(UCSI_ACPI_COMMAND_PENDING, &ua->flags))209220 complete(&ua->complete);210221}211222
+14
drivers/usb/typec/ucsi/ucsi_glink.c
···255255static void pmic_glink_ucsi_register(struct work_struct *work)256256{257257 struct pmic_glink_ucsi *ucsi = container_of(work, struct pmic_glink_ucsi, register_work);258258+ int orientation;259259+ int i;260260+261261+ for (i = 0; i < PMIC_GLINK_MAX_PORTS; i++) {262262+ if (!ucsi->port_orientation[i])263263+ continue;264264+ orientation = gpiod_get_value(ucsi->port_orientation[i]);265265+266266+ if (orientation >= 0) {267267+ typec_switch_set(ucsi->port_switch[i],268268+ orientation ? TYPEC_ORIENTATION_REVERSE269269+ : TYPEC_ORIENTATION_NORMAL);270270+ }271271+ }258272259273 ucsi_register(ucsi->ucsi);260274}
+3
drivers/video/fbdev/Kconfig
···494494 select FB_CFB_COPYAREA495495 select FB_CFB_FILLRECT496496 select FB_CFB_IMAGEBLIT497497+ select FB_IOMEM_FOPS497498498499config FB_BW2499500 bool "BWtwo support"···515514 depends on (FB = y) && (SPARC && FB_SBUS)516515 select FB_CFB_COPYAREA517516 select FB_CFB_IMAGEBLIT517517+ select FB_IOMEM_FOPS518518 help519519 This is the frame buffer device driver for the CGsix (GX, TurboGX)520520 frame buffer.···525523 depends on FB_SBUS && SPARC64526524 select FB_CFB_COPYAREA527525 select FB_CFB_IMAGEBLIT526526+ select FB_IOMEM_FOPS528527 help529528 This is the frame buffer device driver for the Creator, Creator3D,530529 and Elite3D graphics boards.
···15591559 * needing to allocate extents from the block group.15601560 */15611561 used = btrfs_space_info_used(space_info, true);15621562- if (space_info->total_bytes - block_group->length < used) {15621562+ if (space_info->total_bytes - block_group->length < used &&15631563+ block_group->zone_unusable < block_group->length) {15631564 /*15641565 * Add a reference for the list, compensate for the ref15651566 * drop under the "next" label for the
+13
fs/btrfs/extent_io.c
···43334333 if (test_and_set_bit(EXTENT_BUFFER_READING, &eb->bflags))43344334 goto done;4335433543364336+ /*43374337+ * Between the initial test_bit(EXTENT_BUFFER_UPTODATE) and the above43384338+ * test_and_set_bit(EXTENT_BUFFER_READING), someone else could have43394339+ * started and finished reading the same eb. In this case, UPTODATE43404340+ * will now be set, and we shouldn't read it in again.43414341+ */43424342+ if (unlikely(test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))) {43434343+ clear_bit(EXTENT_BUFFER_READING, &eb->bflags);43444344+ smp_mb__after_atomic();43454345+ wake_up_bit(&eb->bflags, EXTENT_BUFFER_READING);43464346+ return 0;43474347+ }43484348+43364349 clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);43374350 eb->read_mirror = 0;43384351 check_buffer_tree_ref(eb);
+8-8
fs/btrfs/extent_map.c
···309309 btrfs_warn(fs_info,310310"no extent map found for inode %llu (root %lld) when unpinning extent range [%llu, %llu), generation %llu",311311 btrfs_ino(inode), btrfs_root_id(inode->root),312312- start, len, gen);312312+ start, start + len, gen);313313 ret = -ENOENT;314314 goto out;315315 }···318318 btrfs_warn(fs_info,319319"found extent map for inode %llu (root %lld) with unexpected start offset %llu when unpinning extent range [%llu, %llu), generation %llu",320320 btrfs_ino(inode), btrfs_root_id(inode->root),321321- em->start, start, len, gen);321321+ em->start, start, start + len, gen);322322 ret = -EUCLEAN;323323 goto out;324324 }···340340 em->mod_len = em->len;341341 }342342343343- free_extent_map(em);344343out:345344 write_unlock(&tree->lock);345345+ free_extent_map(em);346346 return ret;347347348348}···629629 */630630 ret = merge_extent_mapping(em_tree, existing,631631 em, start);632632- if (ret) {632632+ if (WARN_ON(ret)) {633633 free_extent_map(em);634634 *em_in = NULL;635635- WARN_ONCE(ret,636636-"extent map merge error existing [%llu, %llu) with em [%llu, %llu) start %llu\n",637637- existing->start, existing->len,638638- orig_start, orig_len, start);635635+ btrfs_warn(fs_info,636636+"extent map merge error existing [%llu, %llu) with em [%llu, %llu) start %llu",637637+ existing->start, extent_map_end(existing),638638+ orig_start, orig_start + orig_len, start);639639 }640640 free_extent_map(existing);641641 }
+11-1
fs/btrfs/scrub.c
···28122812 gen = btrfs_get_last_trans_committed(fs_info);2813281328142814 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {28152815- bytenr = btrfs_sb_offset(i);28152815+ ret = btrfs_sb_log_location(scrub_dev, i, 0, &bytenr);28162816+ if (ret == -ENOENT)28172817+ break;28182818+28192819+ if (ret) {28202820+ spin_lock(&sctx->stat_lock);28212821+ sctx->stat.super_errors++;28222822+ spin_unlock(&sctx->stat_lock);28232823+ continue;28242824+ }28252825+28162826 if (bytenr + BTRFS_SUPER_INFO_SIZE >28172827 scrub_dev->commit_total_bytes)28182828 break;
+22-5
fs/btrfs/volumes.c
···692692 device->bdev = file_bdev(bdev_file);693693 clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);694694695695+ if (device->devt != device->bdev->bd_dev) {696696+ btrfs_warn(NULL,697697+ "device %s maj:min changed from %d:%d to %d:%d",698698+ device->name->str, MAJOR(device->devt),699699+ MINOR(device->devt), MAJOR(device->bdev->bd_dev),700700+ MINOR(device->bdev->bd_dev));701701+702702+ device->devt = device->bdev->bd_dev;703703+ }704704+695705 fs_devices->open_devices++;696706 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&697707 device->devid != BTRFS_DEV_REPLACE_DEVID) {···11841174 struct btrfs_device *device;11851175 struct btrfs_device *latest_dev = NULL;11861176 struct btrfs_device *tmp_device;11771177+ int ret = 0;1187117811881179 list_for_each_entry_safe(device, tmp_device, &fs_devices->devices,11891180 dev_list) {11901190- int ret;11811181+ int ret2;1191118211921192- ret = btrfs_open_one_device(fs_devices, device, flags, holder);11931193- if (ret == 0 &&11831183+ ret2 = btrfs_open_one_device(fs_devices, device, flags, holder);11841184+ if (ret2 == 0 &&11941185 (!latest_dev || device->generation > latest_dev->generation)) {11951186 latest_dev = device;11961196- } else if (ret == -ENODATA) {11871187+ } else if (ret2 == -ENODATA) {11971188 fs_devices->num_devices--;11981189 list_del(&device->dev_list);11991190 btrfs_free_device(device);12001191 }11921192+ if (ret == 0 && ret2 != 0)11931193+ ret = ret2;12011194 }12021202- if (fs_devices->open_devices == 0)11951195+11961196+ if (fs_devices->open_devices == 0) {11971197+ if (ret)11981198+ return ret;12031199 return -EINVAL;12001200+ }1204120112051202 fs_devices->opened = 1;12061203 fs_devices->latest_dev = latest_dev;
+7-7
fs/btrfs/zoned.c
···15741574 if (!map)15751575 return -EINVAL;1576157615771577- cache->physical_map = btrfs_clone_chunk_map(map, GFP_NOFS);15781578- if (!cache->physical_map) {15791579- ret = -ENOMEM;15801580- goto out;15811581- }15771577+ cache->physical_map = map;1582157815831579 zone_info = kcalloc(map->num_stripes, sizeof(*zone_info), GFP_NOFS);15841580 if (!zone_info) {···16861690 }16871691 bitmap_free(active);16881692 kfree(zone_info);16891689- btrfs_free_chunk_map(map);1690169316911694 return ret;16921695}···21702175 struct btrfs_chunk_map *map;21712176 const bool is_metadata = (block_group->flags &21722177 (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM));21782178+ struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;21732179 int ret = 0;21742180 int i;21752181···22462250 btrfs_clear_data_reloc_bg(block_group);22472251 spin_unlock(&block_group->lock);2248225222532253+ down_read(&dev_replace->rwsem);22492254 map = block_group->physical_map;22502255 for (i = 0; i < map->num_stripes; i++) {22512256 struct btrfs_device *device = map->stripes[i].dev;···22632266 zinfo->zone_size >> SECTOR_SHIFT);22642267 memalloc_nofs_restore(nofs_flags);2265226822662266- if (ret)22692269+ if (ret) {22702270+ up_read(&dev_replace->rwsem);22672271 return ret;22722272+ }2268227322692274 if (!(block_group->flags & BTRFS_BLOCK_GROUP_DATA))22702275 zinfo->reserved_active_zones++;22712276 btrfs_dev_clear_active_zone(device, physical);22722277 }22782278+ up_read(&dev_replace->rwsem);2273227922742280 if (!fully_written)22752281 btrfs_dec_block_group_ro(block_group);
-1
fs/erofs/super.c
···430430431431 switch (mode) {432432 case EROFS_MOUNT_DAX_ALWAYS:433433- warnfc(fc, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");434433 set_opt(&ctx->opt, DAX_ALWAYS);435434 clear_opt(&ctx->opt, DAX_NEVER);436435 return true;
···17181718 struct buffer_head *dibh, *bh;17191719 struct gfs2_holder rd_gh;17201720 unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;17211721- u64 lblock = (offset + (1 << bsize_shift) - 1) >> bsize_shift;17211721+ unsigned int bsize = 1 << bsize_shift;17221722+ u64 lblock = (offset + bsize - 1) >> bsize_shift;17221723 __u16 start_list[GFS2_MAX_META_HEIGHT];17231724 __u16 __end_list[GFS2_MAX_META_HEIGHT], *end_list = NULL;17241725 unsigned int start_aligned, end_aligned;···17301729 u64 prev_bnr = 0;17311730 __be64 *start, *end;1732173117331733- if (offset >= maxsize) {17321732+ if (offset + bsize - 1 >= maxsize) {17341733 /*17351734 * The starting point lies beyond the allocated metadata;17361735 * there are no blocks to deallocate.
+25-11
fs/nfsd/nfs4state.c
···38313831 else38323832 cs_slot = &unconf->cl_cs_slot;38333833 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);38343834- if (status) {38353835- if (status == nfserr_replay_cache) {38363836- status = nfsd4_replay_create_session(cr_ses, cs_slot);38373837- goto out_free_conn;38383838- }38343834+ switch (status) {38353835+ case nfs_ok:38363836+ cs_slot->sl_seqid++;38373837+ cr_ses->seqid = cs_slot->sl_seqid;38383838+ break;38393839+ case nfserr_replay_cache:38403840+ status = nfsd4_replay_create_session(cr_ses, cs_slot);38413841+ fallthrough;38423842+ case nfserr_jukebox:38433843+ /* The server MUST NOT cache NFS4ERR_DELAY */38443844+ goto out_free_conn;38453845+ default:38393846 goto out_cache_error;38403847 }38413841- cs_slot->sl_seqid++;38423842- cr_ses->seqid = cs_slot->sl_seqid;3843384838443849 /* RFC 8881 Section 18.36.4 Phase 3: Client ID confirmation. */38453850 if (conf) {···38643859 old = find_confirmed_client_by_name(&unconf->cl_name, nn);38653860 if (old) {38663861 status = mark_client_expired_locked(old);38673867- if (status) {38683868- old = NULL;38693869- goto out_cache_error;38703870- }38623862+ if (status)38633863+ goto out_expired_error;38713864 trace_nfsd_clid_replaced(&old->cl_clientid);38723865 }38733866 move_to_confirmed(unconf);···38973894 expire_client(old);38983895 return status;3899389638973897+out_expired_error:38983898+ old = NULL;38993899+ /*39003900+ * Revert the slot seq_nr change so the server will process39013901+ * the client's resend instead of returning a cached response.39023902+ */39033903+ if (status == nfserr_jukebox) {39043904+ cs_slot->sl_seqid--;39053905+ cr_ses->seqid = cs_slot->sl_seqid;39063906+ goto out_free_conn;39073907+ }39003908out_cache_error:39013909 nfsd4_cache_create_session(cr_ses, cs_slot, status);39023910out_free_conn:
···1212#include "cifs_fs_sb.h"1313#include "cifsproto.h"14141515+/*1616+ * Key for fscache inode. [!] Contents must match comparisons in cifs_find_inode().1717+ */1818+struct cifs_fscache_inode_key {1919+2020+ __le64 uniqueid; /* server inode number */2121+ __le64 createtime; /* creation time on server */2222+ u8 type; /* S_IFMT file type */2323+} __packed;2424+1525static void cifs_fscache_fill_volume_coherency(1626 struct cifs_tcon *tcon,1727 struct cifs_fscache_volume_coherency_data *cd)···10797void cifs_fscache_get_inode_cookie(struct inode *inode)10898{10999 struct cifs_fscache_inode_coherency_data cd;100100+ struct cifs_fscache_inode_key key;110101 struct cifsInodeInfo *cifsi = CIFS_I(inode);111102 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);112103 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);113104105105+ key.uniqueid = cpu_to_le64(cifsi->uniqueid);106106+ key.createtime = cpu_to_le64(cifsi->createtime);107107+ key.type = (inode->i_mode & S_IFMT) >> 12;114108 cifs_fscache_fill_coherency(&cifsi->netfs.inode, &cd);115109116110 cifsi->netfs.cache =117111 fscache_acquire_cookie(tcon->fscache, 0,118118- &cifsi->uniqueid, sizeof(cifsi->uniqueid),112112+ &key, sizeof(key),119113 &cd, sizeof(cd),120114 i_size_read(&cifsi->netfs.inode));121115 if (cifsi->netfs.cache)
+2
fs/smb/client/inode.c
···13511351{13521352 struct cifs_fattr *fattr = opaque;1353135313541354+ /* [!] The compared values must be the same in struct cifs_fscache_inode_key. */13551355+13541356 /* don't match inode with different uniqueid */13551357 if (CIFS_I(inode)->uniqueid != fattr->cf_uniqueid)13561358 return 0;
+3-1
fs/smb/client/trace.h
···375375DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(delete_enter);376376DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(mkdir_enter);377377DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(tdis_enter);378378+DEFINE_SMB3_INF_COMPOUND_ENTER_EVENT(mknod_enter);378379379380DECLARE_EVENT_CLASS(smb3_inf_compound_done_class,380381 TP_PROTO(unsigned int xid,···416415DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(delete_done);417416DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(mkdir_done);418417DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(tdis_done);419419-418418+DEFINE_SMB3_INF_COMPOUND_DONE_EVENT(mknod_done);420419421420DECLARE_EVENT_CLASS(smb3_inf_compound_err_class,422421 TP_PROTO(unsigned int xid,···462461DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(mkdir_err);463462DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(delete_err);464463DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(tdis_err);464464+DEFINE_SMB3_INF_COMPOUND_ERR_EVENT(mknod_err);465465466466/*467467 * For logging SMB3 Status code and Command for responses which return errors
+31-9
fs/xfs/libxfs/xfs_sb.c
···530530 }531531532532 if (!xfs_validate_stripe_geometry(mp, XFS_FSB_TO_B(mp, sbp->sb_unit),533533- XFS_FSB_TO_B(mp, sbp->sb_width), 0, false))533533+ XFS_FSB_TO_B(mp, sbp->sb_width), 0,534534+ xfs_buf_daddr(bp) == XFS_SB_DADDR, false))534535 return -EFSCORRUPTED;535536536537 /*···13241323}1325132413261325/*13271327- * sunit, swidth, sectorsize(optional with 0) should be all in bytes,13281328- * so users won't be confused by values in error messages.13261326+ * sunit, swidth, sectorsize(optional with 0) should be all in bytes, so users13271327+ * won't be confused by values in error messages. This function returns false13281328+ * if the stripe geometry is invalid and the caller is unable to repair the13291329+ * stripe configuration later in the mount process.13291330 */13301331bool13311332xfs_validate_stripe_geometry(···13351332 __s64 sunit,13361333 __s64 swidth,13371334 int sectorsize,13351335+ bool may_repair,13381336 bool silent)13391337{13401338 if (swidth > INT_MAX) {13411339 if (!silent)13421340 xfs_notice(mp,13431341"stripe width (%lld) is too large", swidth);13441344- return false;13421342+ goto check_override;13451343 }1346134413471345 if (sunit > swidth) {13481346 if (!silent)13491347 xfs_notice(mp,13501348"stripe unit (%lld) is larger than the stripe width (%lld)", sunit, swidth);13511351- return false;13491349+ goto check_override;13521350 }1353135113541352 if (sectorsize && (int)sunit % sectorsize) {···13571353 xfs_notice(mp,13581354"stripe unit (%lld) must be a multiple of the sector size (%d)",13591355 sunit, sectorsize);13601360- return false;13561356+ goto check_override;13611357 }1362135813631359 if (sunit && !swidth) {13641360 if (!silent)13651361 xfs_notice(mp,13661362"invalid stripe unit (%lld) and stripe width of 0", sunit);13671367- return false;13631363+ goto check_override;13681364 }1369136513701366 if (!sunit && swidth) {13711367 if (!silent)13721368 xfs_notice(mp,13731369"invalid stripe width (%lld) and stripe unit of 0", swidth);13741374- return false;13701370+ goto check_override;13751371 }1376137213771373 if (sunit && (int)swidth % (int)sunit) {···13791375 xfs_notice(mp,13801376"stripe width (%lld) must be a multiple of the stripe unit (%lld)",13811377 swidth, sunit);13821382- return false;13781378+ goto check_override;13831379 }13801380+ return true;13811381+13821382+check_override:13831383+ if (!may_repair)13841384+ return false;13851385+ /*13861386+ * During mount, mp->m_dalign will not be set unless the sunit mount13871387+ * option was set. If it was set, ignore the bad stripe alignment values13881388+ * and allow the validation and overwrite later in the mount process to13891389+ * attempt to overwrite the bad stripe alignment values with the values13901390+ * supplied by mount options.13911391+ */13921392+ if (!mp->m_dalign)13931393+ return false;13941394+ if (!silent)13951395+ xfs_notice(mp,13961396+"Will try to correct with specified mount options sunit (%d) and swidth (%d)",13971397+ BBTOB(mp->m_dalign), BBTOB(mp->m_swidth));13841398 return true;13851399}13861400
···10441044 struct xfs_scrub *sc,10451045 struct xfs_inode *ip)10461046{10471047- if (current->journal_info != NULL) {10481048- ASSERT(current->journal_info == sc->tp);10491049-10471047+ if (sc->tp) {10501048 /*10511049 * If we are in a transaction, we /cannot/ drop the inode10521050 * ourselves, because the VFS will trigger writeback, which
-7
fs/xfs/xfs_aops.c
···503503{504504 struct xfs_writepage_ctx wpc = { };505505506506- /*507507- * Writing back data in a transaction context can result in recursive508508- * transactions. This is bad, so issue a warning and get out of here.509509- */510510- if (WARN_ON_ONCE(current->journal_info))511511- return 0;512512-513506 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);514507 return iomap_writepages(mapping, wbc, &wpc.ctx, &xfs_writeback_ops);515508}
+5-3
fs/xfs/xfs_icache.c
···20392039 * - Memory shrinkers queued the inactivation worker and it hasn't finished.20402040 * - The queue depth exceeds the maximum allowable percpu backlog.20412041 *20422042- * Note: If the current thread is running a transaction, we don't ever want to20432043- * wait for other transactions because that could introduce a deadlock.20422042+ * Note: If we are in a NOFS context here (e.g. current thread is running a20432043+ * transaction) the we don't want to block here as inodegc progress may require20442044+ * filesystem resources we hold to make progress and that could result in a20452045+ * deadlock. Hence we skip out of here if we are in a scoped NOFS context.20442046 */20452047static inline bool20462048xfs_inodegc_want_flush_work(···20502048 unsigned int items,20512049 unsigned int shrinker_hits)20522050{20532053- if (current->journal_info)20512051+ if (current->flags & PF_MEMALLOC_NOFS)20542052 return false;2055205320562054 if (shrinker_hits > 0)
···6767 * later.6868 * IRQF_NO_DEBUG - Exclude from runnaway detection for IPI and similar handlers,6969 * depends on IRQF_PERCPU.7070+ * IRQF_COND_ONESHOT - Agree to do IRQF_ONESHOT if already set for a shared7171+ * interrupt.7072 */7173#define IRQF_SHARED 0x000000807274#define IRQF_PROBE_SHARED 0x00000100···8482#define IRQF_COND_SUSPEND 0x000400008583#define IRQF_NO_AUTOEN 0x000800008684#define IRQF_NO_DEBUG 0x001000008585+#define IRQF_COND_ONESHOT 0x0020000087868887#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)8988
···11111212#include <linux/types.h>13131414-/* 15 pointers + header align the folio_batch structure to a power of two */1515-#define PAGEVEC_SIZE 151414+/* 31 pointers + header align the folio_batch structure to a power of two */1515+#define PAGEVEC_SIZE 3116161717struct folio;1818
+1-6
include/linux/skbuff.h
···753753 * @list: queue head754754 * @ll_node: anchor in an llist (eg socket defer_list)755755 * @sk: Socket we are owned by756756- * @ip_defrag_offset: (aka @sk) alternate use of @sk, used in757757- * fragmentation management758756 * @dev: Device we arrived on/are leaving by759757 * @dev_scratch: (aka @dev) alternate use of @dev when @dev would be %NULL760758 * @cb: Control buffer. Free for use by every layer. Put private vars here···873875 struct llist_node ll_node;874876 };875877876876- union {877877- struct sock *sk;878878- int ip_defrag_offset;879879- };878878+ struct sock *sk;880879881880 union {882881 ktime_t tstamp;
+2
include/net/cfg80211.h
···49914991 * set this flag to update channels on beacon hints.49924992 * @WIPHY_FLAG_SUPPORTS_NSTR_NONPRIMARY: support connection to non-primary link49934993 * of an NSTR mobile AP MLD.49944994+ * @WIPHY_FLAG_DISABLE_WEXT: disable wireless extensions for this device49944995 */49954996enum wiphy_flags {49964997 WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK = BIT(0),···50035002 WIPHY_FLAG_4ADDR_STATION = BIT(6),50045003 WIPHY_FLAG_CONTROL_PORT_PROTOCOL = BIT(7),50055004 WIPHY_FLAG_IBSS_RSN = BIT(8),50055005+ WIPHY_FLAG_DISABLE_WEXT = BIT(9),50065006 WIPHY_FLAG_MESH_AUTH = BIT(10),50075007 WIPHY_FLAG_SUPPORTS_EXT_KCK_32 = BIT(11),50085008 WIPHY_FLAG_SUPPORTS_NSTR_NONPRIMARY = BIT(12),
···328328 * @op_runtime_config: called to config Operation and runtime regs Pointers329329 * @get_outstanding_cqs: called to get outstanding completion queues330330 * @config_esi: called to config Event Specific Interrupt331331+ * @config_scsi_dev: called to configure SCSI device parameters331332 */332333struct ufs_hba_variant_ops {333334 const char *name;
+1-1
init/initramfs.c
···682682683683 printk(KERN_INFO "rootfs image is not initramfs (%s); looks like an initrd\n",684684 err);685685- file = filp_open("/initrd.image", O_WRONLY | O_CREAT, 0700);685685+ file = filp_open("/initrd.image", O_WRONLY|O_CREAT|O_LARGEFILE, 0700);686686 if (IS_ERR(file))687687 return;688688
···38383939/* number of bytes addressable by LDX/STX insn with 16-bit 'off' field */4040#define GUARD_SZ (1ull << sizeof(((struct bpf_insn *)0)->off) * 8)4141-#define KERN_VM_SZ ((1ull << 32) + GUARD_SZ)4141+#define KERN_VM_SZ (SZ_4G + GUARD_SZ)42424343struct bpf_arena {4444 struct bpf_map map;···110110 return ERR_PTR(-EINVAL);111111112112 vm_range = (u64)attr->max_entries * PAGE_SIZE;113113- if (vm_range > (1ull << 32))113113+ if (vm_range > SZ_4G)114114 return ERR_PTR(-E2BIG);115115116116 if ((attr->map_extra >> 32) != ((attr->map_extra + vm_range - 1) >> 32))···301301302302 if (pgoff)303303 return -EINVAL;304304- if (len > (1ull << 32))304304+ if (len > SZ_4G)305305 return -E2BIG;306306307307 /* if user_vm_start was specified at arena creation time */···322322 if (WARN_ON_ONCE(arena->user_vm_start))323323 /* checks at map creation time should prevent this */324324 return -EFAULT;325325- return round_up(ret, 1ull << 32);325325+ return round_up(ret, SZ_4G);326326}327327328328static int arena_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)···346346 return -EBUSY;347347348348 /* Earlier checks should prevent this */349349- if (WARN_ON_ONCE(vma->vm_end - vma->vm_start > (1ull << 32) || vma->vm_pgoff))349349+ if (WARN_ON_ONCE(vma->vm_end - vma->vm_start > SZ_4G || vma->vm_pgoff))350350 return -EFAULT;351351352352 if (remember_vma(arena, vma))···420420 if (uaddr & ~PAGE_MASK)421421 return 0;422422 pgoff = compute_pgoff(arena, uaddr);423423- if (pgoff + page_cnt > page_cnt_max)423423+ if (pgoff > page_cnt_max - page_cnt)424424 /* requested address will be outside of user VMA */425425 return 0;426426 }···447447 goto out;448448449449 uaddr32 = (u32)(arena->user_vm_start + pgoff * PAGE_SIZE);450450- /* Earlier checks make sure that uaddr32 + page_cnt * PAGE_SIZE will not overflow 32-bit */450450+ /* Earlier checks made sure that uaddr32 + page_cnt * PAGE_SIZE - 1451451+ * will not overflow 32-bit. Lower 32-bit need to represent452452+ * contiguous user address range.453453+ * Map these pages at kern_vm_start base.454454+ * kern_vm_start + uaddr32 + page_cnt * PAGE_SIZE - 1 can overflow455455+ * lower 32-bit and it's ok.456456+ */451457 ret = vm_area_map_pages(arena->kern_vm, kern_vm_start + uaddr32,452458 kern_vm_start + uaddr32 + page_cnt * PAGE_SIZE, pages);453459 if (ret) {···516510 if (!page)517511 continue;518512 if (page_cnt == 1 && page_mapped(page)) /* mapped by some user process */513513+ /* Optimization for the common case of page_cnt==1:514514+ * If page wasn't mapped into some user vma there515515+ * is no need to call zap_pages which is slow. When516516+ * page_cnt is big it's faster to do the batched zap.517517+ */519518 zap_pages(arena, full_uaddr, 1);520519 vm_area_unmap_pages(arena->kern_vm, kaddr, kaddr + PAGE_SIZE);521520 __free_page(page);
+13
kernel/bpf/bloom_filter.c
···8080 return -EOPNOTSUPP;8181}82828383+/* Called from syscall */8484+static int bloom_map_alloc_check(union bpf_attr *attr)8585+{8686+ if (attr->value_size > KMALLOC_MAX_SIZE)8787+ /* if value_size is bigger, the user space won't be able to8888+ * access the elements.8989+ */9090+ return -E2BIG;9191+9292+ return 0;9393+}9494+8395static struct bpf_map *bloom_map_alloc(union bpf_attr *attr)8496{8597 u32 bitset_bytes, bitset_mask, nr_hash_funcs, nr_bits;···203191BTF_ID_LIST_SINGLE(bpf_bloom_map_btf_ids, struct, bpf_bloom_filter)204192const struct bpf_map_ops bloom_filter_map_ops = {205193 .map_meta_equal = bpf_map_meta_equal,194194+ .map_alloc_check = bloom_map_alloc_check,206195 .map_alloc = bloom_map_alloc,207196 .map_free = bloom_map_free,208197 .map_get_next_key = bloom_map_get_next_key,
···56825682 return reg->type == PTR_TO_FLOW_KEYS;56835683}5684568456855685+static bool is_arena_reg(struct bpf_verifier_env *env, int regno)56865686+{56875687+ const struct bpf_reg_state *reg = reg_state(env, regno);56885688+56895689+ return reg->type == PTR_TO_ARENA;56905690+}56915691+56855692static u32 *reg2btf_ids[__BPF_REG_TYPE_MAX] = {56865693#ifdef CONFIG_NET56875694 [PTR_TO_SOCKET] = &btf_sock_ids[BTF_SOCK_TYPE_SOCK],···67016694 err = check_stack_slot_within_bounds(env, min_off, state, type);67026695 if (!err && max_off > 0)67036696 err = -EINVAL; /* out of stack access into non-negative offsets */66976697+ if (!err && access_size < 0)66986698+ /* access_size should not be negative (or overflow an int); others checks66996699+ * along the way should have prevented such an access.67006700+ */67016701+ err = -EFAULT; /* invalid negative access size; integer overflow? */6704670267056703 if (err) {67066704 if (tnum_is_const(reg->var_off)) {···70317019 if (is_ctx_reg(env, insn->dst_reg) ||70327020 is_pkt_reg(env, insn->dst_reg) ||70337021 is_flow_key_reg(env, insn->dst_reg) ||70347034- is_sk_reg(env, insn->dst_reg)) {70227022+ is_sk_reg(env, insn->dst_reg) ||70237023+ is_arena_reg(env, insn->dst_reg)) {70357024 verbose(env, "BPF_ATOMIC stores into R%d %s is not allowed\n",70367025 insn->dst_reg,70377026 reg_type_str(env, reg_state(env, insn->dst_reg)->type));···1402714014 verbose(env, "addr_space_cast insn can only convert between address space 1 and 0\n");1402814015 return -EINVAL;1402914016 }1401714017+ if (!env->prog->aux->arena) {1401814018+ verbose(env, "addr_space_cast insn can only be used in a program that has an associated arena\n");1401914019+ return -EINVAL;1402014020+ }1403014021 } else {1403114022 if ((insn->off != 0 && insn->off != 8 && insn->off != 16 &&1403214023 insn->off != 32) || insn->imm) {···1406314046 if (insn->imm) {1406414047 /* off == BPF_ADDR_SPACE_CAST */1406514048 mark_reg_unknown(env, regs, insn->dst_reg);1406614066- if (insn->imm == 1) /* cast from as(1) to as(0) */1404914049+ if (insn->imm == 1) { /* cast from as(1) to as(0) */1406714050 dst_reg->type = PTR_TO_ARENA;1405114051+ /* PTR_TO_ARENA is 32-bit */1405214052+ dst_reg->subreg_def = env->insn_idx + 1;1405314053+ }1406814054 } else if (insn->off == 0) {1406914055 /* case: R1 = R21407014056 * copy register state to dest reg···1962119601 (((struct bpf_map *)env->prog->aux->arena)->map_flags & BPF_F_NO_USER_CONV)) {1962219602 /* convert to 32-bit mov that clears upper 32-bit */1962319603 insn->code = BPF_ALU | BPF_MOV | BPF_X;1962419624- /* clear off, so it's a normal 'wX = wY' from JIT pov */1960419604+ /* clear off and imm, so it's a normal 'wX = wY' from JIT pov */1962519605 insn->off = 0;1960619606+ insn->imm = 0;1962619607 } /* cast from as(0) to as(1) should be handled by JIT */1962719608 goto next_insn;1962819609 }
···16431643 }1644164416451645 if (!((old->flags & new->flags) & IRQF_SHARED) ||16461646- (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||16471647- ((old->flags ^ new->flags) & IRQF_ONESHOT))16461646+ (oldtype != (new->flags & IRQF_TRIGGER_MASK)))16471647+ goto mismatch;16481648+16491649+ if ((old->flags & IRQF_ONESHOT) &&16501650+ (new->flags & IRQF_COND_ONESHOT))16511651+ new->flags |= IRQF_ONESHOT;16521652+ else if ((old->flags ^ new->flags) & IRQF_ONESHOT)16481653 goto mismatch;1649165416501655 /* All handlers must agree on per-cpuness */
+5
kernel/module/Kconfig
···236236 possible to load a signed module containing the algorithm to check237237 the signature on that module.238238239239+config MODULE_SIG_SHA1240240+ bool "Sign modules with SHA-1"241241+ select CRYPTO_SHA1242242+239243config MODULE_SIG_SHA256240244 bool "Sign modules with SHA-256"241245 select CRYPTO_SHA256···269265config MODULE_SIG_HASH270266 string271267 depends on MODULE_SIG || IMA_APPRAISE_MODSIG268268+ default "sha1" if MODULE_SIG_SHA1272269 default "sha256" if MODULE_SIG_SHA256273270 default "sha384" if MODULE_SIG_SHA384274271 default "sha512" if MODULE_SIG_SHA512
+6
kernel/printk/printk.c
···20092009 */20102010 mutex_acquire(&console_lock_dep_map, 0, 1, _THIS_IP_);2011201120122012+ /*20132013+ * Update @console_may_schedule for trylock because the previous20142014+ * owner may have been schedulable.20152015+ */20162016+ console_may_schedule = 0;20172017+20122018 return 1;20132019}20142020
+5-2
kernel/sys.c
···24082408 if (bits & PR_MDWE_NO_INHERIT && !(bits & PR_MDWE_REFUSE_EXEC_GAIN))24092409 return -EINVAL;2410241024112411- /* PARISC cannot allow mdwe as it needs writable stacks */24122412- if (IS_ENABLED(CONFIG_PARISC))24112411+ /*24122412+ * EOPNOTSUPP might be more appropriate here in principle, but24132413+ * existing userspace depends on EINVAL specifically.24142414+ */24152415+ if (!arch_memory_deny_write_exec_supported())24132416 return -EINVAL;2414241724152418 current_bits = get_current_mdwe();
···41974197 /* shmem file - in swap cache */41984198 swp_entry_t swp = radix_to_swp_entry(folio);4199419942004200+ /* swapin error results in poisoned entry */42014201+ if (non_swap_entry(swp))42024202+ goto resched;42034203+42044204+ /*42054205+ * Getting a swap entry from the shmem42064206+ * inode means we beat42074207+ * shmem_unuse(). rcu_read_lock()42084208+ * ensures swapoff waits for us before42094209+ * freeing the swapper space. However,42104210+ * we can race with swapping and42114211+ * invalidation, so there might not be42124212+ * a shadow in the swapcache (yet).42134213+ */42004214 shadow = get_shadow_from_swap_cache(swp);42154215+ if (!shadow)42164216+ goto resched;42014217 }42024218#endif42034219 if (workingset_test_recent(shadow, true, &workingset))
+8-6
mm/gup.c
···16531653 if (vma->vm_flags & VM_LOCKONFAULT)16541654 return nr_pages;1655165516561656+ /* ... similarly, we've never faulted in PROT_NONE pages */16571657+ if (!vma_is_accessible(vma))16581658+ return -EFAULT;16591659+16561660 gup_flags = FOLL_TOUCH;16571661 /*16581662 * We want to touch writable mappings with a write fault in order16591663 * to break COW, except for shared mappings because these don't COW16601664 * and we would not want to dirty them for nothing.16651665+ *16661666+ * Otherwise, do a read fault, and use FOLL_FORCE in case it's not16671667+ * readable (ie write-only or executable).16611668 */16621669 if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)16631670 gup_flags |= FOLL_WRITE;16641664-16651665- /*16661666- * We want mlock to succeed for regions that have any permissions16671667- * other than PROT_NONE.16681668- */16691669- if (vma_is_accessible(vma))16711671+ else16701672 gup_flags |= FOLL_FORCE;1671167316721674 if (locked)
···10801080 mutex_lock(&acomp_ctx->mutex);1081108110821082 src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO);10831083- if (acomp_ctx->is_sleepable && !zpool_can_sleep_mapped(zpool)) {10831083+ /*10841084+ * If zpool_map_handle is atomic, we cannot reliably utilize its mapped buffer10851085+ * to do crypto_acomp_decompress() which might sleep. In such cases, we must10861086+ * resort to copying the buffer to a temporary one.10871087+ * Meanwhile, zpool_map_handle() might return a non-linearly mapped buffer,10881088+ * such as a kmap address of high memory or even ever a vmap address.10891089+ * However, sg_init_one is only equipped to handle linearly mapped low memory.10901090+ * In such cases, we also must copy the buffer to a temporary and lowmem one.10911091+ */10921092+ if ((acomp_ctx->is_sleepable && !zpool_can_sleep_mapped(zpool)) ||10931093+ !virt_addr_valid(src)) {10841094 memcpy(acomp_ctx->buffer, src, entry->length);10851095 src = acomp_ctx->buffer;10861096 zpool_unmap_handle(zpool, entry->handle);···11041094 BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE);11051095 mutex_unlock(&acomp_ctx->mutex);1106109611071107- if (!acomp_ctx->is_sleepable || zpool_can_sleep_mapped(zpool))10971097+ if (src != acomp_ctx->buffer)11081098 zpool_unmap_handle(zpool, entry->handle);11091099}11101100···13211311 unsigned long nr_backing, nr_stored, nr_freeable, nr_protected;1322131213231313 if (!zswap_shrinker_enabled || !mem_cgroup_zswap_writeback_enabled(memcg))13141314+ return 0;13151315+13161316+ /*13171317+ * The shrinker resumes swap writeback, which will enter block13181318+ * and may enter fs. XXX: Harmonize with vmscan.c __GFP_FS13191319+ * rules (may_enter_fs()), which apply on a per-folio basis.13201320+ */13211321+ if (!gfp_has_io_fs(sc->gfp_mask))13241322 return 0;1325132313261324#ifdef CONFIG_MEMCG_KMEM···16361618 swp_entry_t swp = folio->swap;16371619 pgoff_t offset = swp_offset(swp);16381620 struct page *page = &folio->page;16211621+ bool swapcache = folio_test_swapcache(folio);16391622 struct zswap_tree *tree = swap_zswap_tree(swp);16401623 struct zswap_entry *entry;16411624 u8 *dst;···16491630 spin_unlock(&tree->lock);16501631 return false;16511632 }16521652- zswap_rb_erase(&tree->rbroot, entry);16331633+ /*16341634+ * When reading into the swapcache, invalidate our entry. The16351635+ * swapcache can be the authoritative owner of the page and16361636+ * its mappings, and the pressure that results from having two16371637+ * in-memory copies outweighs any benefits of caching the16381638+ * compression work.16391639+ *16401640+ * (Most swapins go through the swapcache. The notable16411641+ * exception is the singleton fault on SWP_SYNCHRONOUS_IO16421642+ * files, which reads into a private page and may free it if16431643+ * the fault fails. We remain the primary owner of the entry.)16441644+ */16451645+ if (swapcache)16461646+ zswap_rb_erase(&tree->rbroot, entry);16531647 spin_unlock(&tree->lock);1654164816551649 if (entry->length)···16771645 if (entry->objcg)16781646 count_objcg_event(entry->objcg, ZSWPIN);1679164716801680- zswap_entry_free(entry);16811681-16821682- folio_mark_dirty(folio);16481648+ if (swapcache) {16491649+ zswap_entry_free(entry);16501650+ folio_mark_dirty(folio);16511651+ }1683165216841653 return true;16851654}
+2-2
net/core/sock.c
···482482 unsigned long flags;483483 struct sk_buff_head *list = &sk->sk_receive_queue;484484485485- if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {485485+ if (atomic_read(&sk->sk_rmem_alloc) >= READ_ONCE(sk->sk_rcvbuf)) {486486 atomic_inc(&sk->sk_drops);487487 trace_sock_rcvqueue_full(sk, skb);488488 return -ENOMEM;···552552553553 skb->dev = NULL;554554555555- if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {555555+ if (sk_rcvqueues_full(sk, READ_ONCE(sk->sk_rcvbuf))) {556556 atomic_inc(&sk->sk_drops);557557 goto discard_and_relse;558558 }
···329329config IP_NF_ARPFILTER330330 tristate "arptables-legacy packet filtering support"331331 select IP_NF_ARPTABLES332332+ select NETFILTER_FAMILY_ARP332333 depends on NETFILTER_XTABLES333334 help334335 ARP packet filtering defines a table `filter', which has a series of
+3-1
net/ipv4/nexthop.c
···768768 struct net *net = nh->net;769769 int err;770770771771- if (nexthop_notifiers_is_empty(net))771771+ if (nexthop_notifiers_is_empty(net)) {772772+ *hw_stats_used = false;772773 return 0;774774+ }773775774776 err = nh_notifier_grp_hw_stats_init(&info, nh);775777 if (err)
+2
net/ipv4/tcp.c
···29312931 lock_sock(sk);29322932 __tcp_close(sk, timeout);29332933 release_sock(sk);29342934+ if (!sk->sk_net_refcnt)29352935+ inet_csk_clear_xmit_timers_sync(sk);29342936 sock_put(sk);29352937}29362938EXPORT_SYMBOL(tcp_close);
+3-2
net/ipv6/addrconf.c
···5416541654175417 err = 0;54185418 if (fillargs.ifindex) {54195419- err = -ENODEV;54205419 dev = dev_get_by_index_rcu(tgt_net, fillargs.ifindex);54215421- if (!dev)54205420+ if (!dev) {54215421+ err = -ENODEV;54225422 goto done;54235423+ }54235424 idev = __in6_dev_get(dev);54245425 if (idev)54255426 err = in6_dump_addrs(idev, skb, cb,
···131131};132132133133/**134134- * enum ieee80211_corrupt_data_flags - BSS data corruption flags134134+ * enum ieee80211_bss_corrupt_data_flags - BSS data corruption flags135135 * @IEEE80211_BSS_CORRUPT_BEACON: last beacon frame received was corrupted136136 * @IEEE80211_BSS_CORRUPT_PROBE_RESP: last probe response received was corrupted137137 *···144144};145145146146/**147147- * enum ieee80211_valid_data_flags - BSS valid data flags147147+ * enum ieee80211_bss_valid_data_flags - BSS valid data flags148148 * @IEEE80211_BSS_VALID_WMM: WMM/UAPSD data was gathered from non-corrupt IE149149 * @IEEE80211_BSS_VALID_RATES: Supported rates were gathered from non-corrupt IE150150 * @IEEE80211_BSS_VALID_ERP: ERP flag was gathered from non-corrupt IE
+12-3
net/mac80211/mlme.c
···58745874 }5875587558765876 if (sdata->vif.active_links != active_links) {58775877+ /* usable links are affected when active_links are changed,58785878+ * so notify the driver about the status change58795879+ */58805880+ changed |= BSS_CHANGED_MLD_VALID_LINKS;58815881+ active_links &= sdata->vif.active_links;58825882+ if (!active_links)58835883+ active_links =58845884+ BIT(__ffs(sdata->vif.valid_links &58855885+ ~dormant_links));58775886 ret = ieee80211_set_active_links(&sdata->vif, active_links);58785887 if (ret) {58795888 sdata_info(sdata, "Failed to set TTLM active links\n");···58975888 goto out;58985889 }5899589059005900- changed |= BSS_CHANGED_MLD_VALID_LINKS;59015891 sdata->vif.suspended_links = suspended_links;59025892 if (sdata->vif.suspended_links)59035893 changed |= BSS_CHANGED_MLD_TTLM;···76607652 sdata_info(sdata,76617653 "failed to insert STA entry for the AP (error %d)\n",76627654 err);76637663- goto out_err;76557655+ goto out_release_chan;76647656 }76657657 } else76667658 WARN_ON_ONCE(!ether_addr_equal(link->u.mgd.bssid, cbss->bssid));···7671766376727664 return 0;7673766576667666+out_release_chan:76677667+ ieee80211_link_release_channel(link);76747668out_err:76757675- ieee80211_link_release_channel(&sdata->deflink);76767669 ieee80211_vif_set_links(sdata, 0, 0);76777670 return err;76787671}
+42-8
net/netfilter/nf_tables_api.c
···12001200 __NFT_TABLE_F_WAS_AWAKEN | \12011201 __NFT_TABLE_F_WAS_ORPHAN)1202120212031203+static bool nft_table_pending_update(const struct nft_ctx *ctx)12041204+{12051205+ struct nftables_pernet *nft_net = nft_pernet(ctx->net);12061206+ struct nft_trans *trans;12071207+12081208+ if (ctx->table->flags & __NFT_TABLE_F_UPDATE)12091209+ return true;12101210+12111211+ list_for_each_entry(trans, &nft_net->commit_list, list) {12121212+ if ((trans->msg_type == NFT_MSG_NEWCHAIN ||12131213+ trans->msg_type == NFT_MSG_DELCHAIN) &&12141214+ trans->ctx.table == ctx->table &&12151215+ nft_trans_chain_update(trans))12161216+ return true;12171217+ }12181218+12191219+ return false;12201220+}12211221+12031222static int nf_tables_updtable(struct nft_ctx *ctx)12041223{12051224 struct nft_trans *trans;···12451226 return -EOPNOTSUPP;1246122712471228 /* No dormant off/on/off/on games in single transaction */12481248- if (ctx->table->flags & __NFT_TABLE_F_UPDATE)12291229+ if (nft_table_pending_update(ctx))12491230 return -EINVAL;1250123112511232 trans = nft_trans_alloc(ctx, NFT_MSG_NEWTABLE,···26502631 }26512632 }2652263326342634+ if (table->flags & __NFT_TABLE_F_UPDATE &&26352635+ !list_empty(&hook.list)) {26362636+ NL_SET_BAD_ATTR(extack, attr);26372637+ err = -EOPNOTSUPP;26382638+ goto err_hooks;26392639+ }26402640+26532641 if (!(table->flags & NFT_TABLE_F_DORMANT) &&26542642 nft_is_base_chain(chain) &&26552643 !list_empty(&hook.list)) {···28862860 struct nft_trans *trans;28872861 int err;2888286228632863+ if (ctx->table->flags & __NFT_TABLE_F_UPDATE)28642864+ return -EOPNOTSUPP;28652865+28892866 err = nft_chain_parse_hook(ctx->net, basechain, nla, &chain_hook,28902867 ctx->family, chain->flags, extack);28912868 if (err < 0)···29732944 nft_ctx_init(&ctx, net, skb, info->nlh, family, table, chain, nla);2974294529752946 if (nla[NFTA_CHAIN_HOOK]) {29762976- if (chain->flags & NFT_CHAIN_HW_OFFLOAD)29472947+ if (NFNL_MSG_TYPE(info->nlh->nlmsg_type) == NFT_MSG_DESTROYCHAIN ||29482948+ chain->flags & NFT_CHAIN_HW_OFFLOAD)29772949 return -EOPNOTSUPP;2978295029792951 if (nft_is_base_chain(chain)) {···1021210182 if (nft_trans_chain_update(trans)) {1021310183 nf_tables_chain_notify(&trans->ctx, NFT_MSG_DELCHAIN,1021410184 &nft_trans_chain_hooks(trans));1021510215- nft_netdev_unregister_hooks(net,1021610216- &nft_trans_chain_hooks(trans),1021710217- true);1018510185+ if (!(trans->ctx.table->flags & NFT_TABLE_F_DORMANT)) {1018610186+ nft_netdev_unregister_hooks(net,1018710187+ &nft_trans_chain_hooks(trans),1018810188+ true);1018910189+ }1021810190 } else {1021910191 nft_chain_del(trans->ctx.chain);1022010192 nf_tables_chain_notify(&trans->ctx, NFT_MSG_DELCHAIN,···1049210460 break;1049310461 case NFT_MSG_NEWCHAIN:1049410462 if (nft_trans_chain_update(trans)) {1049510495- nft_netdev_unregister_hooks(net,1049610496- &nft_trans_chain_hooks(trans),1049710497- true);1046310463+ if (!(trans->ctx.table->flags & NFT_TABLE_F_DORMANT)) {1046410464+ nft_netdev_unregister_hooks(net,1046510465+ &nft_trans_chain_hooks(trans),1046610466+ true);1046710467+ }1049810468 free_percpu(nft_trans_chain_stats(trans));1049910469 kfree(nft_trans_chain_name(trans));1050010470 nft_trans_destroy(trans);
+5
net/nfc/nci/core.c
···15161516 nfc_send_to_raw_sock(ndev->nfc_dev, skb,15171517 RAW_PAYLOAD_NCI, NFC_DIRECTION_RX);1518151815191519+ if (!nci_plen(skb->data)) {15201520+ kfree_skb(skb);15211521+ break;15221522+ }15231523+15191524 /* Process frame */15201525 switch (nci_mt(skb->data)) {15211526 case NCI_MT_RSP_PKT:
+8-6
net/sunrpc/auth_gss/gss_krb5_crypto.c
···921921 * Caller provides the truncation length of the output token (h) in922922 * cksumout.len.923923 *924924- * Note that for RPCSEC, the "initial cipher state" is always all zeroes.925925- *926924 * Return values:927925 * %GSS_S_COMPLETE: Digest computed, @cksumout filled in928926 * %GSS_S_FAILURE: Call failed···931933 int body_offset, struct xdr_netobj *cksumout)932934{933935 unsigned int ivsize = crypto_sync_skcipher_ivsize(cipher);934934- static const u8 iv[GSS_KRB5_MAX_BLOCKSIZE];935936 struct ahash_request *req;936937 struct scatterlist sg[1];938938+ u8 *iv, *checksumdata;937939 int err = -ENOMEM;938938- u8 *checksumdata;939940940941 checksumdata = kmalloc(crypto_ahash_digestsize(tfm), GFP_KERNEL);941942 if (!checksumdata)942943 return GSS_S_FAILURE;944944+ /* For RPCSEC, the "initial cipher state" is always all zeroes. */945945+ iv = kzalloc(ivsize, GFP_KERNEL);946946+ if (!iv)947947+ goto out_free_mem;943948944949 req = ahash_request_alloc(tfm, GFP_KERNEL);945950 if (!req)946946- goto out_free_cksumdata;951951+ goto out_free_mem;947952 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);948953 err = crypto_ahash_init(req);949954 if (err)···970969971970out_free_ahash:972971 ahash_request_free(req);973973-out_free_cksumdata:972972+out_free_mem:973973+ kfree(iv);974974 kfree_sensitive(checksumdata);975975 return err ? GSS_S_FAILURE : GSS_S_COMPLETE;976976}
+5-2
net/tls/tls_sw.c
···19761976 if (unlikely(flags & MSG_ERRQUEUE))19771977 return sock_recv_errqueue(sk, msg, len, SOL_IP, IP_RECVERR);1978197819791979- psock = sk_psock_get(sk);19801979 err = tls_rx_reader_lock(sk, ctx, flags & MSG_DONTWAIT);19811980 if (err < 0)19821981 return err;19821982+ psock = sk_psock_get(sk);19831983 bpf_strp_enabled = sk_psock_strp_enabled(psock);1984198419851985 /* If crypto failed the connection is broken */···21522152 }2153215321542154 /* Drain records from the rx_list & copy if required */21552155- if (is_peek || is_kvec)21552155+ if (is_peek)21562156 err = process_rx_list(ctx, msg, &control, copied + peeked,21572157 decrypted - peeked, is_peek, NULL);21582158 else21592159 err = process_rx_list(ctx, msg, &control, 0,21602160 async_copy_bytes, is_peek, NULL);21612161+21622162+ /* we could have copied less than we wanted, and possibly nothing */21632163+ decrypted += max(err, 0) - async_copy_bytes;21612164 }2162216521632166 copied += decrypted;
···282282 }283283}284284285285-void menu_finalize(struct menu *parent)285285+static void _menu_finalize(struct menu *parent, bool inside_choice)286286{287287 struct menu *menu, *last_menu;288288 struct symbol *sym;···296296 * and propagate parent dependencies before moving on.297297 */298298299299- if (sym && sym_is_choice(sym)) {299299+ bool is_choice = false;300300+301301+ if (sym && sym_is_choice(sym))302302+ is_choice = true;303303+304304+ if (is_choice) {300305 if (sym->type == S_UNKNOWN) {301306 /* find the first choice value to find out choice type */302307 current_entry = parent;···399394 }400395 }401396402402- if (sym && sym_is_choice(sym))397397+ if (is_choice)403398 expr_free(parentdep);404399405400 /*···407402 * moving on408403 */409404 for (menu = parent->list; menu; menu = menu->next)410410- menu_finalize(menu);411411- } else if (sym) {405405+ _menu_finalize(menu, is_choice);406406+ } else if (!inside_choice && sym) {412407 /*413408 * Automatic submenu creation. If sym is a symbol and A, B, C,414409 * ... are consecutive items (symbols, menus, ifs, etc.) that···468463 /* Superset, put in submenu */469464 expr_free(dep2);470465 next:471471- menu_finalize(menu);466466+ _menu_finalize(menu, false);472467 menu->parent = parent;473468 last_menu = menu;474469 }···585580 expr_alloc_and(parent->prompt->visible.expr,586581 expr_alloc_symbol(&symbol_mod)));587582 }583583+}584584+585585+void menu_finalize(void)586586+{587587+ _menu_finalize(&rootmenu, false);588588}589589590590bool menu_has_prompt(struct menu *menu)
+1-1
scripts/kconfig/parser.y
···515515 menu_add_prompt(P_MENU, "Main menu", NULL);516516 }517517518518- menu_finalize(&rootmenu);518518+ menu_finalize();519519520520 menu = &rootmenu;521521 while (menu) {
+5-2
scripts/mod/modpost.c
···1007100710081008static Elf_Sym *find_tosym(struct elf_info *elf, Elf_Addr addr, Elf_Sym *sym)10091009{10101010+ Elf_Sym *new_sym;10111011+10101012 /* If the supplied symbol has a valid name, return it */10111013 if (is_valid_name(elf, sym))10121014 return sym;···10171015 * Strive to find a better symbol name, but the resulting name may not10181016 * match the symbol referenced in the original code.10191017 */10201020- return symsearch_find_nearest(elf, addr, get_secindex(elf, sym),10211021- true, 20);10181018+ new_sym = symsearch_find_nearest(elf, addr, get_secindex(elf, sym),10191019+ true, 20);10201020+ return new_sym ? new_sym : sym;10221021}1023102210241023static bool is_executable_section(struct elf_info *elf, unsigned int secndx)
···8989 struct snd_kcontrol *dsp_prog_ctl;9090 struct snd_kcontrol *dsp_conf_ctl;9191 struct snd_kcontrol *prof_ctl;9292- struct snd_kcontrol *snd_ctls[3];9292+ struct snd_kcontrol *snd_ctls[2];9393};94949595static int tas2781_get_i2c_res(struct acpi_resource *ares, void *data)···161161 pm_runtime_put_autosuspend(dev);162162 break;163163 default:164164- dev_dbg(tas_hda->dev, "Playback action not supported: %d\n",165165- action);166164 break;167165 }168166}···183185{184186 struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);185187188188+ mutex_lock(&tas_priv->codec_lock);189189+186190 ucontrol->value.integer.value[0] = tas_priv->rcabin.profile_cfg_id;191191+192192+ dev_dbg(tas_priv->dev, "%s: kcontrol %s: %d\n",193193+ __func__, kcontrol->id.name, tas_priv->rcabin.profile_cfg_id);194194+195195+ mutex_unlock(&tas_priv->codec_lock);187196188197 return 0;189198}···205200206201 val = clamp(nr_profile, 0, max);207202203203+ mutex_lock(&tas_priv->codec_lock);204204+205205+ dev_dbg(tas_priv->dev, "%s: kcontrol %s: %d -> %d\n",206206+ __func__, kcontrol->id.name,207207+ tas_priv->rcabin.profile_cfg_id, val);208208+208209 if (tas_priv->rcabin.profile_cfg_id != val) {209210 tas_priv->rcabin.profile_cfg_id = val;210211 ret = 1;211212 }213213+214214+ mutex_unlock(&tas_priv->codec_lock);212215213216 return ret;214217}···254241{255242 struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);256243244244+ mutex_lock(&tas_priv->codec_lock);245245+257246 ucontrol->value.integer.value[0] = tas_priv->cur_prog;247247+248248+ dev_dbg(tas_priv->dev, "%s: kcontrol %s: %d\n",249249+ __func__, kcontrol->id.name, tas_priv->cur_prog);250250+251251+ mutex_unlock(&tas_priv->codec_lock);258252259253 return 0;260254}···277257278258 val = clamp(nr_program, 0, max);279259260260+ mutex_lock(&tas_priv->codec_lock);261261+262262+ dev_dbg(tas_priv->dev, "%s: kcontrol %s: %d -> %d\n",263263+ __func__, kcontrol->id.name, tas_priv->cur_prog, val);264264+280265 if (tas_priv->cur_prog != val) {281266 tas_priv->cur_prog = val;282267 ret = 1;283268 }269269+270270+ mutex_unlock(&tas_priv->codec_lock);284271285272 return ret;286273}···297270{298271 struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);299272273273+ mutex_lock(&tas_priv->codec_lock);274274+300275 ucontrol->value.integer.value[0] = tas_priv->cur_conf;276276+277277+ dev_dbg(tas_priv->dev, "%s: kcontrol %s: %d\n",278278+ __func__, kcontrol->id.name, tas_priv->cur_conf);279279+280280+ mutex_unlock(&tas_priv->codec_lock);301281302282 return 0;303283}···320286321287 val = clamp(nr_config, 0, max);322288289289+ mutex_lock(&tas_priv->codec_lock);290290+291291+ dev_dbg(tas_priv->dev, "%s: kcontrol %s: %d -> %d\n",292292+ __func__, kcontrol->id.name, tas_priv->cur_conf, val);293293+323294 if (tas_priv->cur_conf != val) {324295 tas_priv->cur_conf = val;325296 ret = 1;326297 }327298299299+ mutex_unlock(&tas_priv->codec_lock);300300+328301 return ret;329329-}330330-331331-/*332332- * tas2781_digital_getvol - get the volum control333333- * @kcontrol: control pointer334334- * @ucontrol: User data335335- * Customer Kcontrol for tas2781 is primarily for regmap booking, paging336336- * depends on internal regmap mechanism.337337- * tas2781 contains book and page two-level register map, especially338338- * book switching will set the register BXXP00R7F, after switching to the339339- * correct book, then leverage the mechanism for paging to access the340340- * register.341341- */342342-static int tas2781_digital_getvol(struct snd_kcontrol *kcontrol,343343- struct snd_ctl_elem_value *ucontrol)344344-{345345- struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);346346- struct soc_mixer_control *mc =347347- (struct soc_mixer_control *)kcontrol->private_value;348348-349349- return tasdevice_digital_getvol(tas_priv, ucontrol, mc);350302}351303352304static int tas2781_amp_getvol(struct snd_kcontrol *kcontrol,···341321 struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);342322 struct soc_mixer_control *mc =343323 (struct soc_mixer_control *)kcontrol->private_value;324324+ int ret;344325345345- return tasdevice_amp_getvol(tas_priv, ucontrol, mc);346346-}326326+ mutex_lock(&tas_priv->codec_lock);347327348348-static int tas2781_digital_putvol(struct snd_kcontrol *kcontrol,349349- struct snd_ctl_elem_value *ucontrol)350350-{351351- struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);352352- struct soc_mixer_control *mc =353353- (struct soc_mixer_control *)kcontrol->private_value;328328+ ret = tasdevice_amp_getvol(tas_priv, ucontrol, mc);354329355355- /* The check of the given value is in tasdevice_digital_putvol. */356356- return tasdevice_digital_putvol(tas_priv, ucontrol, mc);330330+ dev_dbg(tas_priv->dev, "%s: kcontrol %s: %ld\n",331331+ __func__, kcontrol->id.name, ucontrol->value.integer.value[0]);332332+333333+ mutex_unlock(&tas_priv->codec_lock);334334+335335+ return ret;357336}358337359338static int tas2781_amp_putvol(struct snd_kcontrol *kcontrol,···361342 struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);362343 struct soc_mixer_control *mc =363344 (struct soc_mixer_control *)kcontrol->private_value;345345+ int ret;346346+347347+ mutex_lock(&tas_priv->codec_lock);348348+349349+ dev_dbg(tas_priv->dev, "%s: kcontrol %s: -> %ld\n",350350+ __func__, kcontrol->id.name, ucontrol->value.integer.value[0]);364351365352 /* The check of the given value is in tasdevice_amp_putvol. */366366- return tasdevice_amp_putvol(tas_priv, ucontrol, mc);353353+ ret = tasdevice_amp_putvol(tas_priv, ucontrol, mc);354354+355355+ mutex_unlock(&tas_priv->codec_lock);356356+357357+ return ret;367358}368359369360static int tas2781_force_fwload_get(struct snd_kcontrol *kcontrol,···381352{382353 struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);383354355355+ mutex_lock(&tas_priv->codec_lock);356356+384357 ucontrol->value.integer.value[0] = (int)tas_priv->force_fwload_status;385385- dev_dbg(tas_priv->dev, "%s : Force FWload %s\n", __func__,386386- tas_priv->force_fwload_status ? "ON" : "OFF");358358+ dev_dbg(tas_priv->dev, "%s: kcontrol %s: %d\n",359359+ __func__, kcontrol->id.name, tas_priv->force_fwload_status);360360+361361+ mutex_unlock(&tas_priv->codec_lock);387362388363 return 0;389364}···398365 struct tasdevice_priv *tas_priv = snd_kcontrol_chip(kcontrol);399366 bool change, val = (bool)ucontrol->value.integer.value[0];400367368368+ mutex_lock(&tas_priv->codec_lock);369369+370370+ dev_dbg(tas_priv->dev, "%s: kcontrol %s: %d -> %d\n",371371+ __func__, kcontrol->id.name,372372+ tas_priv->force_fwload_status, val);373373+401374 if (tas_priv->force_fwload_status == val)402375 change = false;403376 else {404377 change = true;405378 tas_priv->force_fwload_status = val;406379 }407407- dev_dbg(tas_priv->dev, "%s : Force FWload %s\n", __func__,408408- tas_priv->force_fwload_status ? "ON" : "OFF");380380+381381+ mutex_unlock(&tas_priv->codec_lock);409382410383 return change;411384}···420381 ACARD_SINGLE_RANGE_EXT_TLV("Speaker Analog Gain", TAS2781_AMP_LEVEL,421382 1, 0, 20, 0, tas2781_amp_getvol,422383 tas2781_amp_putvol, amp_vol_tlv),423423- ACARD_SINGLE_RANGE_EXT_TLV("Speaker Digital Gain", TAS2781_DVC_LVL,424424- 0, 0, 200, 1, tas2781_digital_getvol,425425- tas2781_digital_putvol, dvc_tlv),426384 ACARD_SINGLE_BOOL_EXT("Speaker Force Firmware Load", 0,427385 tas2781_force_fwload_get, tas2781_force_fwload_put),428386};
+14-3
sound/sh/aica.c
···278278 dreamcastcard->clicks++;279279 if (unlikely(dreamcastcard->clicks >= AICA_PERIOD_NUMBER))280280 dreamcastcard->clicks %= AICA_PERIOD_NUMBER;281281- mod_timer(&dreamcastcard->timer, jiffies + 1);281281+ if (snd_pcm_running(dreamcastcard->substream))282282+ mod_timer(&dreamcastcard->timer, jiffies + 1);282283 }283284}284285···291290 /*timer function - so cannot sleep */292291 int play_period;293292 struct snd_pcm_runtime *runtime;293293+ if (!snd_pcm_running(substream))294294+ return;294295 runtime = substream->runtime;295296 dreamcastcard = substream->pcm->private_data;296297 /* Have we played out an additional period? */···353350 return 0;354351}355352353353+static int snd_aicapcm_pcm_sync_stop(struct snd_pcm_substream *substream)354354+{355355+ struct snd_card_aica *dreamcastcard = substream->pcm->private_data;356356+357357+ del_timer_sync(&dreamcastcard->timer);358358+ cancel_work_sync(&dreamcastcard->spu_dma_work);359359+ return 0;360360+}361361+356362static int snd_aicapcm_pcm_close(struct snd_pcm_substream357363 *substream)358364{359365 struct snd_card_aica *dreamcastcard = substream->pcm->private_data;360360- flush_work(&(dreamcastcard->spu_dma_work));361361- del_timer(&dreamcastcard->timer);362366 dreamcastcard->substream = NULL;363367 kfree(dreamcastcard->channel);364368 spu_disable();···411401 .prepare = snd_aicapcm_pcm_prepare,412402 .trigger = snd_aicapcm_pcm_trigger,413403 .pointer = snd_aicapcm_pcm_pointer,404404+ .sync_stop = snd_aicapcm_pcm_sync_stop,414405};415406416407/* TO DO: set up to handle more than one pcm instance */
+16-3
sound/soc/sof/ipc4-topology.c
···13561356 int sample_rate, channel_count;13571357 int bit_depth, ret;13581358 u32 nhlt_type;13591359+ int dev_type = 0;1359136013601361 /* convert to NHLT type */13611362 switch (linktype) {···13721371 &bit_depth);13731372 if (ret < 0)13741373 return ret;13741374+13751375+ /*13761376+ * We need to know the type of the external device attached to a SSP13771377+ * port to retrieve the blob from NHLT. However, device type is not13781378+ * specified in topology.13791379+ * Query the type for the port and then pass that information back13801380+ * to the blob lookup function.13811381+ */13821382+ dev_type = intel_nhlt_ssp_device_type(sdev->dev, ipc4_data->nhlt,13831383+ dai_index);13841384+ if (dev_type < 0)13851385+ return dev_type;13751386 break;13761387 default:13771388 return 0;13781389 }1379139013801380- dev_dbg(sdev->dev, "dai index %d nhlt type %d direction %d\n",13811381- dai_index, nhlt_type, dir);13911391+ dev_dbg(sdev->dev, "dai index %d nhlt type %d direction %d dev type %d\n",13921392+ dai_index, nhlt_type, dir, dev_type);1382139313831394 /* find NHLT blob with matching params */13841395 cfg = intel_nhlt_get_endpoint_blob(sdev->dev, ipc4_data->nhlt, dai_index, nhlt_type,13851396 bit_depth, bit_depth, channel_count, sample_rate,13861386- dir, 0);13971397+ dir, dev_type);1387139813881399 if (!cfg) {13891400 dev_err(sdev->dev,
···121121 int i, n;122122123123 /* recognize hard coded LLVM section name */124124- if (strcmp(sec_name, ".arena.1") == 0) {124124+ if (strcmp(sec_name, ".addr_space.1") == 0) {125125 /* this is the name to use in skeleton */126126 snprintf(buf, buf_sz, "arena");127127 return true;
···228228 presence = ''229229 for i in range(0, len(ref)):230230 presence = f"{var}->{'.'.join(ref[:i] + [''])}_present.{ref[i]}"231231- if self.presence_type() == 'bit':232232- code.append(presence + ' = 1;')231231+ # Every layer below last is a nest, so we know it uses bit presence232232+ # last layer is "self" and may be a complex type233233+ if i == len(ref) - 1 and self.presence_type() != 'bit':234234+ continue235235+ code.append(presence + ' = 1;')233236 code += self._setter_lines(ri, member, presence)234237235238 func_name = f"{op_prefix(ri, direction, deref=deref)}_set_{'_'.join(ref)}"
+1-1
tools/objtool/check.c
···585585 struct section *rsec;586586 struct reloc *reloc;587587 struct instruction *insn;588588- unsigned long offset;588588+ uint64_t offset;589589590590 /*591591 * Check for manually annotated dead ends.
···33#include <test_progs.h>44#include <sys/mman.h>55#include <network_helpers.h>66-66+#include <sys/user.h>77+#ifndef PAGE_SIZE /* on some archs it comes in sys/user.h */88+#include <unistd.h>99+#define PAGE_SIZE getpagesize()1010+#endif711#include "arena_htab_asm.skel.h"812#include "arena_htab.skel.h"99-1010-#define PAGE_SIZE 409611131214#include "bpf_arena_htab.h"1315
···33#include <test_progs.h>44#include <sys/mman.h>55#include <network_helpers.h>66-77-#define PAGE_SIZE 409666+#include <sys/user.h>77+#ifndef PAGE_SIZE /* on some archs it comes in sys/user.h */88+#include <unistd.h>99+#define PAGE_SIZE getpagesize()1010+#endif811912#include "bpf_arena_list.h"1013#include "arena_list.skel.h"