···77 signals when the PCI layer is able to support establishment of88 link encryption and other device-security features coordinated99 through a platform tsm.1010-1111-What: /sys/class/tsm/tsmN/streamH.R.E1212-Contact: linux-pci@vger.kernel.org1313-Description:1414- (RO) When a host bridge has established a secure connection via1515- the platform TSM, symlink appears. The primary function of this1616- is have a system global review of TSM resource consumption1717- across host bridges. The link points to the endpoint PCI device1818- and matches the same link published by the host bridge. See1919- Documentation/ABI/testing/sysfs-devices-pci-host-bridge.
+5
Documentation/admin-guide/kernel-parameters.txt
···34723472 If there are multiple matching configurations changing34733473 the same attribute, the last one is used.3474347434753475+ liveupdate= [KNL,EARLY]34763476+ Format: <bool>34773477+ Enable Live Update Orchestrator (LUO).34783478+ Default: off.34793479+34753480 load_ramdisk= [RAM] [Deprecated]3476348134773482 lockd.nlm_grace_period=P [NFS] Assign grace period.
···4242extern void *__memset64(uint64_t *, uint32_t low, __kernel_size_t, uint32_t hi);4343static inline void *memset64(uint64_t *p, uint64_t v, __kernel_size_t n)4444{4545- return __memset64(p, v, n * 8, v >> 32);4545+ if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))4646+ return __memset64(p, v, n * 8, v >> 32);4747+ else4848+ return __memset64(p, v >> 32, n * 8, v);4649}47504851/*
+4-3
arch/x86/include/asm/kfence.h
···4242{4343 unsigned int level;4444 pte_t *pte = lookup_address(addr, &level);4545- pteval_t val;4545+ pteval_t val, new;46464747 if (WARN_ON(!pte || level != PG_LEVEL_4K))4848 return false;···5757 return true;58585959 /*6060- * Otherwise, invert the entire PTE. This avoids writing out an6060+ * Otherwise, flip the Present bit, taking care to avoid writing an6161 * L1TF-vulnerable PTE (not present, without the high address bits6262 * set).6363 */6464- set_pte(pte, __pte(~val));6464+ new = val ^ _PAGE_PRESENT;6565+ set_pte(pte, __pte(flip_protnone_guard(val, new, PTE_PFN_MASK)));65666667 /*6768 * If the page was protected (non-present) and we're making it
···514514 */515515 spin_lock_irq(&kvm->irqfds.lock);516516517517- if (irqfd->irq_entry.type == KVM_IRQ_ROUTING_MSI) {517517+ if (irqfd->irq_entry.type == KVM_IRQ_ROUTING_MSI ||518518+ WARN_ON_ONCE(irqfd->irq_bypass_vcpu)) {518519 ret = kvm_pi_update_irte(irqfd, NULL);519520 if (ret)520521 pr_info("irq bypass consumer (eventfd %p) unregistration fails: %d\n",
+2-2
arch/x86/kvm/svm/avic.c
···376376377377static int avic_init_backing_page(struct kvm_vcpu *vcpu)378378{379379+ u32 max_id = x2avic_enabled ? x2avic_max_physical_id : AVIC_MAX_PHYSICAL_ID;379380 struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);380381 struct vcpu_svm *svm = to_svm(vcpu);381382 u32 id = vcpu->vcpu_id;···389388 * avic_vcpu_load() expects to be called if and only if the vCPU has390389 * fully initialized AVIC.391390 */392392- if ((!x2avic_enabled && id > AVIC_MAX_PHYSICAL_ID) ||393393- (id > x2avic_max_physical_id)) {391391+ if (id > max_id) {394392 kvm_set_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_PHYSICAL_ID_TOO_BIG);395393 vcpu->arch.apic->apicv_active = false;396394 return 0;
+2
arch/x86/kvm/svm/svm.c
···52845284 */52855285 kvm_cpu_cap_clear(X86_FEATURE_BUS_LOCK_DETECT);52865286 kvm_cpu_cap_clear(X86_FEATURE_MSR_IMM);52875287+52885288+ kvm_setup_xss_caps();52875289}5288529052895291static __init int svm_hardware_setup(void)
···99539953};99549954#endif9955995599569956+void kvm_setup_xss_caps(void)99579957+{99589958+ if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES))99599959+ kvm_caps.supported_xss = 0;99609960+99619961+ if (!kvm_cpu_cap_has(X86_FEATURE_SHSTK) &&99629962+ !kvm_cpu_cap_has(X86_FEATURE_IBT))99639963+ kvm_caps.supported_xss &= ~XFEATURE_MASK_CET_ALL;99649964+99659965+ if ((kvm_caps.supported_xss & XFEATURE_MASK_CET_ALL) != XFEATURE_MASK_CET_ALL) {99669966+ kvm_cpu_cap_clear(X86_FEATURE_SHSTK);99679967+ kvm_cpu_cap_clear(X86_FEATURE_IBT);99689968+ kvm_caps.supported_xss &= ~XFEATURE_MASK_CET_ALL;99699969+ }99709970+}99719971+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_setup_xss_caps);99729972+99569973static inline void kvm_ops_update(struct kvm_x86_init_ops *ops)99579974{99589975 memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops));···1014110124 /* KVM always ignores guest PAT for shadow paging. */1014210125 if (!tdp_enabled)1014310126 kvm_caps.supported_quirks &= ~KVM_X86_QUIRK_IGNORE_GUEST_PAT;1014410144-1014510145- if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES))1014610146- kvm_caps.supported_xss = 0;1014710147-1014810148- if (!kvm_cpu_cap_has(X86_FEATURE_SHSTK) &&1014910149- !kvm_cpu_cap_has(X86_FEATURE_IBT))1015010150- kvm_caps.supported_xss &= ~XFEATURE_MASK_CET_ALL;1015110151-1015210152- if ((kvm_caps.supported_xss & XFEATURE_MASK_CET_ALL) != XFEATURE_MASK_CET_ALL) {1015310153- kvm_cpu_cap_clear(X86_FEATURE_SHSTK);1015410154- kvm_cpu_cap_clear(X86_FEATURE_IBT);1015510155- kvm_caps.supported_xss &= ~XFEATURE_MASK_CET_ALL;1015610156- }10157101271015810128 if (kvm_caps.has_tsc_control) {1015910129 /*
+2
arch/x86/kvm/x86.h
···471471472472extern bool enable_pmu;473473474474+void kvm_setup_xss_caps(void);475475+474476/*475477 * Get a filtered version of KVM's supported XCR0 that strips out dynamic476478 * features for which the current process doesn't (yet) have permission to use.
+16-3
drivers/android/binder.c
···29912991 * @t: the binder transaction that failed29922992 * @data_size: the user provided data size for the transaction29932993 * @error: enum binder_driver_return_protocol returned to sender29942994+ *29952995+ * Note that t->buffer is not safe to access here, as it may have been29962996+ * released (or not yet allocated). Callers should guarantee all the29972997+ * transaction items used here are safe to access.29942998 */29952999static void binder_netlink_report(struct binder_proc *proc,29963000 struct binder_transaction *t,···37843780 goto err_dead_proc_or_thread;37853781 }37863782 } else {37833783+ /*37843784+ * Make a transaction copy. It is not safe to access 't' after37853785+ * binder_proc_transaction() reported a pending frozen. The37863786+ * target could thaw and consume the transaction at any point.37873787+ * Instead, use a safe 't_copy' for binder_netlink_report().37883788+ */37893789+ struct binder_transaction t_copy = *t;37903790+37873791 BUG_ON(target_node == NULL);37883792 BUG_ON(t->buffer->async_transaction != 1);37893793 return_error = binder_proc_transaction(t, target_proc, NULL);···38023790 */38033791 if (return_error == BR_TRANSACTION_PENDING_FROZEN) {38043792 tcomplete->type = BINDER_WORK_TRANSACTION_PENDING;38053805- binder_netlink_report(proc, t, tr->data_size,37933793+ binder_netlink_report(proc, &t_copy, tr->data_size,38063794 return_error);38073795 }38083796 binder_enqueue_thread_work(thread, tcomplete);···38243812 return;3825381338263814err_dead_proc_or_thread:38273827- binder_txn_error("%d:%d dead process or thread\n",38283828- thread->pid, proc->pid);38153815+ binder_txn_error("%d:%d %s process or thread\n",38163816+ proc->pid, thread->pid,38173817+ return_error == BR_FROZEN_REPLY ? "frozen" : "dead");38293818 return_error_line = __LINE__;38303819 binder_dequeue_work(proc, tcomplete);38313820err_translate_failed:
+6-11
drivers/android/binder/rust_binderfs.c
···132132 mutex_lock(&binderfs_minors_mutex);133133 if (++info->device_count <= info->mount_opts.max)134134 minor = ida_alloc_max(&binderfs_minors,135135- use_reserve ? BINDERFS_MAX_MINOR :136136- BINDERFS_MAX_MINOR_CAPPED,135135+ use_reserve ? BINDERFS_MAX_MINOR - 1 :136136+ BINDERFS_MAX_MINOR_CAPPED - 1,137137 GFP_KERNEL);138138 else139139 minor = -ENOSPC;···391391 if (!device)392392 return -ENOMEM;393393394394- /* If we have already created a binder-control node, return. */395395- if (info->control_dentry) {396396- ret = 0;397397- goto out;398398- }399399-400394 ret = -ENOMEM;401395 inode = new_inode(sb);402396 if (!inode)···399405 /* Reserve a new minor number for the new device. */400406 mutex_lock(&binderfs_minors_mutex);401407 minor = ida_alloc_max(&binderfs_minors,402402- use_reserve ? BINDERFS_MAX_MINOR :403403- BINDERFS_MAX_MINOR_CAPPED,408408+ use_reserve ? BINDERFS_MAX_MINOR - 1 :409409+ BINDERFS_MAX_MINOR_CAPPED - 1,404410 GFP_KERNEL);405411 mutex_unlock(&binderfs_minors_mutex);406412 if (minor < 0) {···425431426432 inode->i_private = device;427433 info->control_dentry = dentry;428428- d_add(dentry, inode);434434+ d_make_persistent(dentry, inode);435435+ dput(dentry);429436430437 return 0;431438
+70-39
drivers/android/binder/thread.rs
···3939 sync::atomic::{AtomicU32, Ordering},4040};41414242+fn is_aligned(value: usize, to: usize) -> bool {4343+ value % to == 04444+}4545+4246/// Stores the layout of the scatter-gather entries. This is used during the `translate_objects`4347/// call and is discarded when it returns.4448struct ScatterGatherState {···7369}74707571/// This entry specifies that a fixup should happen at `target_offset` of the7676-/// buffer. If `skip` is nonzero, then the fixup is a `binder_fd_array_object`7777-/// and is applied later. Otherwise if `skip` is zero, then the size of the7878-/// fixup is `sizeof::<u64>()` and `pointer_value` is written to the buffer.7979-struct PointerFixupEntry {8080- /// The number of bytes to skip, or zero for a `binder_buffer_object` fixup.8181- skip: usize,8282- /// The translated pointer to write when `skip` is zero.8383- pointer_value: u64,8484- /// The offset at which the value should be written. The offset is relative8585- /// to the original buffer.8686- target_offset: usize,7272+/// buffer.7373+enum PointerFixupEntry {7474+ /// A fixup for a `binder_buffer_object`.7575+ Fixup {7676+ /// The translated pointer to write.7777+ pointer_value: u64,7878+ /// The offset at which the value should be written. The offset is relative7979+ /// to the original buffer.8080+ target_offset: usize,8181+ },8282+ /// A skip for a `binder_fd_array_object`.8383+ Skip {8484+ /// The number of bytes to skip.8585+ skip: usize,8686+ /// The offset at which the skip should happen. The offset is relative8787+ /// to the original buffer.8888+ target_offset: usize,8989+ },8790}88918992/// Return type of `apply_and_validate_fixup_in_parent`.···773762774763 parent_entry.fixup_min_offset = info.new_min_offset;775764 parent_entry.pointer_fixups.push(776776- PointerFixupEntry {777777- skip: 0,765765+ PointerFixupEntry::Fixup {778766 pointer_value: buffer_ptr_in_user_space,779767 target_offset: info.target_offset,780768 },···799789 let num_fds = usize::try_from(obj.num_fds).map_err(|_| EINVAL)?;800790 let fds_len = num_fds.checked_mul(size_of::<u32>()).ok_or(EINVAL)?;801791792792+ if !is_aligned(parent_offset, size_of::<u32>()) {793793+ return Err(EINVAL.into());794794+ }795795+802796 let info = sg_state.validate_parent_fixup(parent_index, parent_offset, fds_len)?;803797 view.alloc.info_add_fd_reserve(num_fds)?;804798···817803 }818804 };819805806806+ if !is_aligned(parent_entry.sender_uaddr, size_of::<u32>()) {807807+ return Err(EINVAL.into());808808+ }809809+820810 parent_entry.fixup_min_offset = info.new_min_offset;821811 parent_entry822812 .pointer_fixups823813 .push(824824- PointerFixupEntry {814814+ PointerFixupEntry::Skip {825815 skip: fds_len,826826- pointer_value: 0,827816 target_offset: info.target_offset,828817 },829818 GFP_KERNEL,···837820 .sender_uaddr838821 .checked_add(parent_offset)839822 .ok_or(EINVAL)?;823823+840824 let mut fda_bytes = KVec::new();841825 UserSlice::new(UserPtr::from_addr(fda_uaddr as _), fds_len)842826 .read_all(&mut fda_bytes, GFP_KERNEL)?;···889871 let mut reader =890872 UserSlice::new(UserPtr::from_addr(sg_entry.sender_uaddr), sg_entry.length).reader();891873 for fixup in &mut sg_entry.pointer_fixups {892892- let fixup_len = if fixup.skip == 0 {893893- size_of::<u64>()894894- } else {895895- fixup.skip874874+ let (fixup_len, fixup_offset) = match fixup {875875+ PointerFixupEntry::Fixup { target_offset, .. } => {876876+ (size_of::<u64>(), *target_offset)877877+ }878878+ PointerFixupEntry::Skip {879879+ skip,880880+ target_offset,881881+ } => (*skip, *target_offset),896882 };897883898898- let target_offset_end = fixup.target_offset.checked_add(fixup_len).ok_or(EINVAL)?;899899- if fixup.target_offset < end_of_previous_fixup || offset_end < target_offset_end {884884+ let target_offset_end = fixup_offset.checked_add(fixup_len).ok_or(EINVAL)?;885885+ if fixup_offset < end_of_previous_fixup || offset_end < target_offset_end {900886 pr_warn!(901887 "Fixups oob {} {} {} {}",902902- fixup.target_offset,888888+ fixup_offset,903889 end_of_previous_fixup,904890 offset_end,905891 target_offset_end···912890 }913891914892 let copy_off = end_of_previous_fixup;915915- let copy_len = fixup.target_offset - end_of_previous_fixup;893893+ let copy_len = fixup_offset - end_of_previous_fixup;916894 if let Err(err) = alloc.copy_into(&mut reader, copy_off, copy_len) {917895 pr_warn!("Failed copying into alloc: {:?}", err);918896 return Err(err.into());919897 }920920- if fixup.skip == 0 {921921- let res = alloc.write::<u64>(fixup.target_offset, &fixup.pointer_value);898898+ if let PointerFixupEntry::Fixup { pointer_value, .. } = fixup {899899+ let res = alloc.write::<u64>(fixup_offset, pointer_value);922900 if let Err(err) = res {923901 pr_warn!("Failed copying ptr into alloc: {:?}", err);924902 return Err(err.into());···971949972950 let data_size = trd.data_size.try_into().map_err(|_| EINVAL)?;973951 let aligned_data_size = ptr_align(data_size).ok_or(EINVAL)?;974974- let offsets_size = trd.offsets_size.try_into().map_err(|_| EINVAL)?;975975- let aligned_offsets_size = ptr_align(offsets_size).ok_or(EINVAL)?;976976- let buffers_size = tr.buffers_size.try_into().map_err(|_| EINVAL)?;977977- let aligned_buffers_size = ptr_align(buffers_size).ok_or(EINVAL)?;952952+ let offsets_size: usize = trd.offsets_size.try_into().map_err(|_| EINVAL)?;953953+ let buffers_size: usize = tr.buffers_size.try_into().map_err(|_| EINVAL)?;978954 let aligned_secctx_size = match secctx.as_ref() {979955 Some((_offset, ctx)) => ptr_align(ctx.len()).ok_or(EINVAL)?,980956 None => 0,981957 };982958959959+ if !is_aligned(offsets_size, size_of::<u64>()) {960960+ return Err(EINVAL.into());961961+ }962962+ if !is_aligned(buffers_size, size_of::<u64>()) {963963+ return Err(EINVAL.into());964964+ }965965+983966 // This guarantees that at least `sizeof(usize)` bytes will be allocated.984967 let len = usize::max(985968 aligned_data_size986986- .checked_add(aligned_offsets_size)987987- .and_then(|sum| sum.checked_add(aligned_buffers_size))969969+ .checked_add(offsets_size)970970+ .and_then(|sum| sum.checked_add(buffers_size))988971 .and_then(|sum| sum.checked_add(aligned_secctx_size))989972 .ok_or(ENOMEM)?,990990- size_of::<usize>(),973973+ size_of::<u64>(),991974 );992992- let secctx_off = aligned_data_size + aligned_offsets_size + aligned_buffers_size;975975+ let secctx_off = aligned_data_size + offsets_size + buffers_size;993976 let mut alloc =994977 match to_process.buffer_alloc(debug_id, len, is_oneway, self.process.task.pid()) {995978 Ok(alloc) => alloc,···1026999 }1027100010281001 let offsets_start = aligned_data_size;10291029- let offsets_end = aligned_data_size + aligned_offsets_size;10021002+ let offsets_end = aligned_data_size + offsets_size;1030100310311004 // This state is used for BINDER_TYPE_PTR objects.10321005 let sg_state = sg_state.insert(ScatterGatherState {10331006 unused_buffer_space: UnusedBufferSpace {10341007 offset: offsets_end,10351035- limit: len,10081008+ limit: offsets_end + buffers_size,10361009 },10371010 sg_entries: KVec::new(),10381011 ancestors: KVec::new(),···10411014 // Traverse the objects specified.10421015 let mut view = AllocationView::new(&mut alloc, data_size);10431016 for (index, index_offset) in (offsets_start..offsets_end)10441044- .step_by(size_of::<usize>())10171017+ .step_by(size_of::<u64>())10451018 .enumerate()10461019 {10471047- let offset = view.alloc.read(index_offset)?;10201020+ let offset: usize = view10211021+ .alloc10221022+ .read::<u64>(index_offset)?10231023+ .try_into()10241024+ .map_err(|_| EINVAL)?;1048102510491049- if offset < end_of_previous_object {10261026+ if offset < end_of_previous_object || !is_aligned(offset, size_of::<u32>()) {10501027 pr_warn!("Got transaction with invalid offset.");10511028 return Err(EINVAL.into());10521029 }···10821051 }1083105210841053 // Update the indexes containing objects to clean up.10851085- let offset_after_object = index_offset + size_of::<usize>();10541054+ let offset_after_object = index_offset + size_of::<u64>();10861055 view.alloc10871056 .set_info_offsets(offsets_start..offset_after_object);10881057 }
+4-4
drivers/android/binderfs.c
···132132 mutex_lock(&binderfs_minors_mutex);133133 if (++info->device_count <= info->mount_opts.max)134134 minor = ida_alloc_max(&binderfs_minors,135135- use_reserve ? BINDERFS_MAX_MINOR :136136- BINDERFS_MAX_MINOR_CAPPED,135135+ use_reserve ? BINDERFS_MAX_MINOR - 1 :136136+ BINDERFS_MAX_MINOR_CAPPED - 1,137137 GFP_KERNEL);138138 else139139 minor = -ENOSPC;···408408 /* Reserve a new minor number for the new device. */409409 mutex_lock(&binderfs_minors_mutex);410410 minor = ida_alloc_max(&binderfs_minors,411411- use_reserve ? BINDERFS_MAX_MINOR :412412- BINDERFS_MAX_MINOR_CAPPED,411411+ use_reserve ? BINDERFS_MAX_MINOR - 1 :412412+ BINDERFS_MAX_MINOR_CAPPED - 1,413413 GFP_KERNEL);414414 mutex_unlock(&binderfs_minors_mutex);415415 if (minor < 0) {
···263263 chip->irq.num_parents = data->intr_num;264264 chip->irq.parents = devm_kcalloc(&pdev->dev, data->intr_num,265265 sizeof(*chip->irq.parents), GFP_KERNEL);266266- if (!chip->parent)266266+ if (!chip->irq.parents)267267 return -ENOMEM;268268269269 for (i = 0; i < data->intr_num; i++) {
+1
drivers/gpio/gpiolib-acpi-core.c
···13591359 while (element < end) {13601360 switch (element->type) {13611361 case ACPI_TYPE_LOCAL_REFERENCE:13621362+ case ACPI_TYPE_STRING:13621363 element += 3;13631364 fallthrough;13641365 case ACPI_TYPE_INTEGER:
+9-9
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
···1920192019211921 /* Make sure restore workers don't access the BO any more */19221922 mutex_lock(&process_info->lock);19231923- list_del(&mem->validate_list);19231923+ if (!list_empty(&mem->validate_list))19241924+ list_del_init(&mem->validate_list);19241925 mutex_unlock(&process_info->lock);19251925-19261926- /* Cleanup user pages and MMU notifiers */19271927- if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {19281928- amdgpu_hmm_unregister(mem->bo);19291929- mutex_lock(&process_info->notifier_lock);19301930- amdgpu_hmm_range_free(mem->range);19311931- mutex_unlock(&process_info->notifier_lock);19321932- }1933192619341927 ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);19351928 if (unlikely(ret))19361929 return ret;19301930+19311931+ /* Cleanup user pages and MMU notifiers */19321932+ if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {19331933+ amdgpu_hmm_unregister(mem->bo);19341934+ amdgpu_hmm_range_free(mem->range);19351935+ mem->range = NULL;19361936+ }1937193719381938 amdgpu_amdkfd_remove_eviction_fence(mem->bo,19391939 process_info->eviction_fence);
···88#include <linux/module.h>99#include <linux/of_platform.h>1010#include <linux/platform_device.h>1111+#include <linux/pm_runtime.h>1112#include <linux/regmap.h>1213#include <drm/bridge/dw_hdmi.h>1314#include <sound/asoundef.h>···34333534struct imx8mp_hdmi_pai {3635 struct regmap *regmap;3636+ struct device *dev;3737};38383939static void imx8mp_hdmi_pai_enable(struct dw_hdmi *dw_hdmi, int channel,···4442 const struct dw_hdmi_plat_data *pdata = dw_hdmi_to_plat_data(dw_hdmi);4543 struct imx8mp_hdmi_pai *hdmi_pai = pdata->priv_audio;4644 int val;4545+4646+ if (pm_runtime_resume_and_get(hdmi_pai->dev) < 0)4747+ return;47484849 /* PAI set control extended */4950 val = WTMK_HIGH(3) | WTMK_LOW(3);···90859186 /* Stop PAI */9287 regmap_write(hdmi_pai->regmap, HTX_PAI_CTRL, 0);8888+8989+ pm_runtime_put_sync(hdmi_pai->dev);9390}94919592static const struct regmap_config imx8mp_hdmi_pai_regmap_config = {···108101 struct imx8mp_hdmi_pai *hdmi_pai;109102 struct resource *res;110103 void __iomem *base;104104+ int ret;111105112106 hdmi_pai = devm_kzalloc(dev, sizeof(*hdmi_pai), GFP_KERNEL);113107 if (!hdmi_pai)···128120 plat_data->enable_audio = imx8mp_hdmi_pai_enable;129121 plat_data->disable_audio = imx8mp_hdmi_pai_disable;130122 plat_data->priv_audio = hdmi_pai;123123+124124+ hdmi_pai->dev = dev;125125+ ret = devm_pm_runtime_enable(dev);126126+ if (ret < 0) {127127+ dev_err(dev, "failed to enable PM runtime: %d\n", ret);128128+ return ret;129129+ }131130132131 return 0;133132}
+13-23
drivers/gpu/drm/gma500/psb_irq.c
···250250void gma_irq_preinstall(struct drm_device *dev)251251{252252 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);253253- struct drm_crtc *crtc;254253 unsigned long irqflags;255254256255 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);···260261 PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE);261262 PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);262263263263- drm_for_each_crtc(crtc, dev) {264264- struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);265265-266266- if (vblank->enabled) {267267- u32 mask = drm_crtc_index(crtc) ? _PSB_VSYNC_PIPEB_FLAG :268268- _PSB_VSYNC_PIPEA_FLAG;269269- dev_priv->vdc_irq_mask |= mask;270270- }271271- }264264+ if (dev->vblank[0].enabled)265265+ dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;266266+ if (dev->vblank[1].enabled)267267+ dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;272268273269 /* Revisit this area - want per device masks ? */274270 if (dev_priv->ops->hotplug)···278284void gma_irq_postinstall(struct drm_device *dev)279285{280286 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);281281- struct drm_crtc *crtc;282287 unsigned long irqflags;288288+ unsigned int i;283289284290 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);285291···292298 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);293299 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);294300295295- drm_for_each_crtc(crtc, dev) {296296- struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);297297-298298- if (vblank->enabled)299299- gma_enable_pipestat(dev_priv, drm_crtc_index(crtc), PIPE_VBLANK_INTERRUPT_ENABLE);301301+ for (i = 0; i < dev->num_crtcs; ++i) {302302+ if (dev->vblank[i].enabled)303303+ gma_enable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);300304 else301301- gma_disable_pipestat(dev_priv, drm_crtc_index(crtc), PIPE_VBLANK_INTERRUPT_ENABLE);305305+ gma_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);302306 }303307304308 if (dev_priv->ops->hotplug_enable)···337345{338346 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);339347 struct pci_dev *pdev = to_pci_dev(dev->dev);340340- struct drm_crtc *crtc;341348 unsigned long irqflags;349349+ unsigned int i;342350343351 if (!dev_priv->irq_enabled)344352 return;···350358351359 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);352360353353- drm_for_each_crtc(crtc, dev) {354354- struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);355355-356356- if (vblank->enabled)357357- gma_disable_pipestat(dev_priv, drm_crtc_index(crtc), PIPE_VBLANK_INTERRUPT_ENABLE);361361+ for (i = 0; i < dev->num_crtcs; ++i) {362362+ if (dev->vblank[i].enabled)363363+ gma_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);358364 }359365360366 dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
+12-19
drivers/gpu/drm/mgag200/mgag200_bmc.c
···11// SPDX-License-Identifier: GPL-2.0-only2233#include <linux/delay.h>44+#include <linux/iopoll.h>4556#include <drm/drm_atomic_helper.h>67#include <drm/drm_edid.h>···1312void mgag200_bmc_stop_scanout(struct mga_device *mdev)1413{1514 u8 tmp;1616- int iter_max;1515+ int ret;17161817 /*1918 * 1 - The first step is to inform the BMC of an upcoming mode···43424443 /*4544 * 3a- The third step is to verify if there is an active scan.4646- * We are waiting for a 0 on remhsyncsts <XSPAREREG<0>).4545+ * We are waiting for a 0 on remhsyncsts (<XSPAREREG<0>).4746 */4848- iter_max = 300;4949- while (!(tmp & 0x1) && iter_max) {5050- WREG8(DAC_INDEX, MGA1064_SPAREREG);5151- tmp = RREG8(DAC_DATA);5252- udelay(1000);5353- iter_max--;5454- }4747+ ret = read_poll_timeout(RREG_DAC, tmp, !(tmp & 0x1),4848+ 1000, 300000, false,4949+ MGA1064_SPAREREG);5050+ if (ret == -ETIMEDOUT)5151+ return;55525653 /*5757- * 3b- This step occurs only if the remove is actually5454+ * 3b- This step occurs only if the remote BMC is actually5855 * scanning. We are waiting for the end of the frame which is5956 * a 1 on remvsyncsts (XSPAREREG<1>)6057 */6161- if (iter_max) {6262- iter_max = 300;6363- while ((tmp & 0x2) && iter_max) {6464- WREG8(DAC_INDEX, MGA1064_SPAREREG);6565- tmp = RREG8(DAC_DATA);6666- udelay(1000);6767- iter_max--;6868- }6969- }5858+ (void)read_poll_timeout(RREG_DAC, tmp, (tmp & 0x2),5959+ 1000, 300000, false,6060+ MGA1064_SPAREREG);7061}71627263void mgag200_bmc_start_scanout(struct mga_device *mdev)
···4444 * NVKM_GSP_RPC_REPLY_NOWAIT - If specified, immediately return to the4545 * caller after the GSP RPC command is issued.4646 *4747+ * NVKM_GSP_RPC_REPLY_NOSEQ - If specified, exactly like NOWAIT4848+ * but don't emit RPC sequence number.4949+ *4750 * NVKM_GSP_RPC_REPLY_RECV - If specified, wait and receive the entire GSP4851 * RPC message after the GSP RPC command is issued.4952 *···5653 */5754enum nvkm_gsp_rpc_reply_policy {5855 NVKM_GSP_RPC_REPLY_NOWAIT = 0,5656+ NVKM_GSP_RPC_REPLY_NOSEQ,5957 NVKM_GSP_RPC_REPLY_RECV,6058 NVKM_GSP_RPC_REPLY_POLL,6159};···245241246242 /* The size of the registry RPC */247243 size_t registry_rpc_size;244244+245245+ u32 rpc_seq;248246249247#ifdef CONFIG_DEBUG_FS250248 /*
···12011201}1202120212031203/**12041204- * xe_get_migrate_exec_queue() - Get the execution queue from migrate context.12041204+ * xe_migrate_exec_queue() - Get the execution queue from migrate context.12051205 * @migrate: Migrate context.12061206 *12071207 * Return: Pointer to execution queue on success, error on failure
+10-3
drivers/gpu/drm/xe/xe_pm.c
···88#include <linux/fault-inject.h>99#include <linux/pm_runtime.h>1010#include <linux/suspend.h>1111+#include <linux/dmi.h>11121213#include <drm/drm_managed.h>1314#include <drm/ttm/ttm_placement.h>···358357359358static u32 vram_threshold_value(struct xe_device *xe)360359{361361- /* FIXME: D3Cold temporarily disabled by default on BMG */362362- if (xe->info.platform == XE_BATTLEMAGE)363363- return 0;360360+ if (xe->info.platform == XE_BATTLEMAGE) {361361+ const char *product_name;362362+363363+ product_name = dmi_get_system_info(DMI_PRODUCT_NAME);364364+ if (product_name && strstr(product_name, "NUC13RNG")) {365365+ drm_warn(&xe->drm, "BMG + D3Cold not supported on this platform\n");366366+ return 0;367367+ }368368+ }364369365370 return DEFAULT_VRAM_THRESHOLD;366371}
+1-1
drivers/gpu/drm/xe/xe_query.c
···491491492492 if (copy_to_user(*ptr, topo, sizeof(*topo)))493493 return -EFAULT;494494- *ptr += sizeof(topo);494494+ *ptr += sizeof(*topo);495495496496 if (copy_to_user(*ptr, mask, mask_size))497497 return -EFAULT;
···148148 int ret;149149150150 ret = pm_runtime_put_sync(fan_data->dev);151151- if (ret < 0)151151+ if (ret < 0 && ret != -ENOSYS)152152 return ret;153153 }154154···291291{292292 struct gpio_fan_data *fan_data = dev_get_drvdata(dev);293293 unsigned long rpm;294294- int ret = count;294294+ int ret;295295296296 if (kstrtoul(buf, 10, &rpm))297297 return -EINVAL;···308308exit_unlock:309309 mutex_unlock(&fan_data->lock);310310311311- return ret;311311+ return ret ? ret : count;312312}313313314314static DEVICE_ATTR_RW(pwm1);
+1
drivers/hwmon/occ/common.c
···749749 * are dynamically allocated, we cannot use the existing kernel macros which750750 * stringify the name argument.751751 */752752+__printf(7, 8)752753static void occ_init_attribute(struct occ_attribute *attr, int mode,753754 ssize_t (*show)(struct device *dev, struct device_attribute *attr, char *buf),754755 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
+2-1
drivers/i2c/busses/i2c-imx.c
···1103110311041104 case IMX_I2C_STATE_READ_BLOCK_DATA_LEN:11051105 i2c_imx_isr_read_block_data_len(i2c_imx);11061106- i2c_imx->state = IMX_I2C_STATE_READ_CONTINUE;11061106+ if (i2c_imx->state == IMX_I2C_STATE_READ_BLOCK_DATA_LEN)11071107+ i2c_imx->state = IMX_I2C_STATE_READ_CONTINUE;11071108 break;1108110911091110 case IMX_I2C_STATE_WRITE:
+1-1
drivers/iommu/intel/pasid.h
···24242525#define PASID_FLAG_NESTED BIT(1)2626#define PASID_FLAG_PAGE_SNOOP BIT(2)2727-#define PASID_FLAG_PWSNP BIT(2)2727+#define PASID_FLAG_PWSNP BIT(3)28282929/*3030 * The PASID_FLAG_FL5LP flag Indicates using 5-level paging for first-
+3
drivers/net/ethernet/adi/adin1110.c
···1089108910901090 reset_gpio = devm_gpiod_get_optional(&priv->spidev->dev, "reset",10911091 GPIOD_OUT_LOW);10921092+ if (IS_ERR(reset_gpio))10931093+ return dev_err_probe(&priv->spidev->dev, PTR_ERR(reset_gpio),10941094+ "failed to get reset gpio\n");10921095 if (reset_gpio) {10931096 /* MISO pin is used for internal configuration, can't have10941097 * anyone else disturbing the SDO line.
+20-19
drivers/net/ethernet/cavium/liquidio/lio_main.c
···35053505 */35063506 netdev->netdev_ops = &lionetdevops;3507350735083508+ lio = GET_LIO(netdev);35093509+35103510+ memset(lio, 0, sizeof(struct lio));35113511+35123512+ lio->ifidx = ifidx_or_pfnum;35133513+35143514+ props = &octeon_dev->props[i];35153515+ props->gmxport = resp->cfg_info.linfo.gmxport;35163516+ props->netdev = netdev;35173517+35183518+ /* Point to the properties for octeon device to which this35193519+ * interface belongs.35203520+ */35213521+ lio->oct_dev = octeon_dev;35223522+ lio->octprops = props;35233523+ lio->netdev = netdev;35243524+35083525 retval = netif_set_real_num_rx_queues(netdev, num_oqueues);35093526 if (retval) {35103527 dev_err(&octeon_dev->pci_dev->dev,···35373520 WRITE_ONCE(sc->caller_is_done, true);35383521 goto setup_nic_dev_free;35393522 }35403540-35413541- lio = GET_LIO(netdev);35423542-35433543- memset(lio, 0, sizeof(struct lio));35443544-35453545- lio->ifidx = ifidx_or_pfnum;35463546-35473547- props = &octeon_dev->props[i];35483548- props->gmxport = resp->cfg_info.linfo.gmxport;35493549- props->netdev = netdev;3550352335513524 lio->linfo.num_rxpciq = num_oqueues;35523525 lio->linfo.num_txpciq = num_iqueues;···36023595 /* MTU range: 68 - 16000 */36033596 netdev->min_mtu = LIO_MIN_MTU_SIZE;36043597 netdev->max_mtu = LIO_MAX_MTU_SIZE;36053605-36063606- /* Point to the properties for octeon device to which this36073607- * interface belongs.36083608- */36093609- lio->oct_dev = octeon_dev;36103610- lio->octprops = props;36113611- lio->netdev = netdev;3612359836133599 dev_dbg(&octeon_dev->pci_dev->dev,36143600 "if%d gmx: %d hw_addr: 0x%llx\n", i,···37503750 if (!devlink) {37513751 device_unlock(&octeon_dev->pci_dev->dev);37523752 dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n");37533753+ i--;37533754 goto setup_nic_dev_free;37543755 }37553756···3766376537673766setup_nic_dev_free:3768376737693769- while (i--) {37683768+ do {37703769 dev_err(&octeon_dev->pci_dev->dev,37713770 "NIC ifidx:%d Setup failed\n", i);37723771 liquidio_destroy_nic_device(octeon_dev, i);37733773- }37723772+ } while (i--);3774377337753774setup_nic_dev_done:37763775
···33143314 if (ice_is_reset_in_progress(pf->state))33153315 goto skip_irq;3316331633173317- if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) {33183318- /* Process outstanding Tx timestamps. If there is more work,33193319- * re-arm the interrupt to trigger again.33203320- */33213321- if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) {33223322- wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);33233323- ice_flush(hw);33243324- }33253325- }33173317+ if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread))33183318+ ice_ptp_process_ts(pf);3326331933273320skip_irq:33283321 ice_irq_dynamic_ena(hw, NULL, NULL);33223322+ ice_flush(hw);33233323+33243324+ if (ice_ptp_tx_tstamps_pending(pf)) {33253325+ /* If any new Tx timestamps happened while in interrupt,33263326+ * re-arm the interrupt to trigger it again.33273327+ */33283328+ wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);33293329+ ice_flush(hw);33303330+ }3329333133303332 return IRQ_HANDLED;33313333}···7809780778107808 /* Restore timestamp mode settings after VSI rebuild */78117809 ice_ptp_restore_timestamp_mode(pf);78107810+78117811+ /* Start PTP periodic work after VSI is fully rebuilt */78127812+ ice_ptp_queue_work(pf);78127813 return;7813781478147815err_vsi_rebuild:···96619656 if (err)96629657 netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n",96639658 vsi->vsi_num, vsi->vsw->sw_id);96649664-96659665- /* Update existing tunnels information */96669666- udp_tunnel_get_rx_info(netdev);9667965996689660 return err;96699661}
+109-70
drivers/net/ethernet/intel/ice/ice_ptp.c
···573573 pf = ptp_port_to_pf(ptp_port);574574 hw = &pf->hw;575575576576+ if (!tx->init)577577+ return;578578+576579 /* Read the Tx ready status first */577580 if (tx->has_ready_bitmap) {578581 err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready);···677674 pf->ptp.tx_hwtstamp_good += tstamp_good;678675}679676680680-/**681681- * ice_ptp_tx_tstamp_owner - Process Tx timestamps for all ports on the device682682- * @pf: Board private structure683683- */684684-static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf)677677+static void ice_ptp_tx_tstamp_owner(struct ice_pf *pf)685678{686679 struct ice_ptp_port *port;687687- unsigned int i;688680689681 mutex_lock(&pf->adapter->ports.lock);690682 list_for_each_entry(port, &pf->adapter->ports.ports, list_node) {···691693 ice_ptp_process_tx_tstamp(tx);692694 }693695 mutex_unlock(&pf->adapter->ports.lock);694694-695695- for (i = 0; i < ICE_GET_QUAD_NUM(pf->hw.ptp.num_lports); i++) {696696- u64 tstamp_ready;697697- int err;698698-699699- /* Read the Tx ready status first */700700- err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);701701- if (err)702702- break;703703- else if (tstamp_ready)704704- return ICE_TX_TSTAMP_WORK_PENDING;705705- }706706-707707- return ICE_TX_TSTAMP_WORK_DONE;708708-}709709-710710-/**711711- * ice_ptp_tx_tstamp - Process Tx timestamps for this function.712712- * @tx: Tx tracking structure to initialize713713- *714714- * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding incomplete715715- * Tx timestamps, or ICE_TX_TSTAMP_WORK_DONE otherwise.716716- */717717-static enum ice_tx_tstamp_work ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)718718-{719719- bool more_timestamps;720720- unsigned long flags;721721-722722- if (!tx->init)723723- return ICE_TX_TSTAMP_WORK_DONE;724724-725725- /* Process the Tx timestamp tracker */726726- ice_ptp_process_tx_tstamp(tx);727727-728728- /* Check if there are outstanding Tx timestamps */729729- spin_lock_irqsave(&tx->lock, flags);730730- more_timestamps = tx->init && !bitmap_empty(tx->in_use, tx->len);731731- spin_unlock_irqrestore(&tx->lock, flags);732732-733733- if (more_timestamps)734734- return ICE_TX_TSTAMP_WORK_PENDING;735735-736736- return ICE_TX_TSTAMP_WORK_DONE;737696}738697739698/**···13021347 /* Do not reconfigure E810 or E830 PHY */13031348 return;13041349 case ICE_MAC_GENERIC:13051305- case ICE_MAC_GENERIC_3K_E825:13061350 ice_ptp_port_phy_restart(ptp_port);13511351+ return;13521352+ case ICE_MAC_GENERIC_3K_E825:13531353+ if (linkup)13541354+ ice_ptp_port_phy_restart(ptp_port);13071355 return;13081356 default:13091357 dev_warn(ice_pf_to_dev(pf), "%s: Unknown PHY type\n", __func__);···26212663 return idx + tx->offset;26222664}2623266526242624-/**26252625- * ice_ptp_process_ts - Process the PTP Tx timestamps26262626- * @pf: Board private structure26272627- *26282628- * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding Tx26292629- * timestamps that need processing, and ICE_TX_TSTAMP_WORK_DONE otherwise.26302630- */26312631-enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf)26662666+void ice_ptp_process_ts(struct ice_pf *pf)26322667{26332668 switch (pf->ptp.tx_interrupt_mode) {26342669 case ICE_PTP_TX_INTERRUPT_NONE:26352670 /* This device has the clock owner handle timestamps for it */26362636- return ICE_TX_TSTAMP_WORK_DONE;26712671+ return;26372672 case ICE_PTP_TX_INTERRUPT_SELF:26382673 /* This device handles its own timestamps */26392639- return ice_ptp_tx_tstamp(&pf->ptp.port.tx);26742674+ ice_ptp_process_tx_tstamp(&pf->ptp.port.tx);26752675+ return;26402676 case ICE_PTP_TX_INTERRUPT_ALL:26412677 /* This device handles timestamps for all ports */26422642- return ice_ptp_tx_tstamp_owner(pf);26782678+ ice_ptp_tx_tstamp_owner(pf);26792679+ return;26432680 default:26442681 WARN_ONCE(1, "Unexpected Tx timestamp interrupt mode %u\n",26452682 pf->ptp.tx_interrupt_mode);26462646- return ICE_TX_TSTAMP_WORK_DONE;26832683+ return;26472684 }26852685+}26862686+26872687+static bool ice_port_has_timestamps(struct ice_ptp_tx *tx)26882688+{26892689+ bool more_timestamps;26902690+26912691+ scoped_guard(spinlock_irqsave, &tx->lock) {26922692+ if (!tx->init)26932693+ return false;26942694+26952695+ more_timestamps = !bitmap_empty(tx->in_use, tx->len);26962696+ }26972697+26982698+ return more_timestamps;26992699+}27002700+27012701+static bool ice_any_port_has_timestamps(struct ice_pf *pf)27022702+{27032703+ struct ice_ptp_port *port;27042704+27052705+ scoped_guard(mutex, &pf->adapter->ports.lock) {27062706+ list_for_each_entry(port, &pf->adapter->ports.ports,27072707+ list_node) {27082708+ struct ice_ptp_tx *tx = &port->tx;27092709+27102710+ if (ice_port_has_timestamps(tx))27112711+ return true;27122712+ }27132713+ }27142714+27152715+ return false;27162716+}27172717+27182718+bool ice_ptp_tx_tstamps_pending(struct ice_pf *pf)27192719+{27202720+ struct ice_hw *hw = &pf->hw;27212721+ unsigned int i;27222722+27232723+ /* Check software indicator */27242724+ switch (pf->ptp.tx_interrupt_mode) {27252725+ case ICE_PTP_TX_INTERRUPT_NONE:27262726+ return false;27272727+ case ICE_PTP_TX_INTERRUPT_SELF:27282728+ if (ice_port_has_timestamps(&pf->ptp.port.tx))27292729+ return true;27302730+ break;27312731+ case ICE_PTP_TX_INTERRUPT_ALL:27322732+ if (ice_any_port_has_timestamps(pf))27332733+ return true;27342734+ break;27352735+ default:27362736+ WARN_ONCE(1, "Unexpected Tx timestamp interrupt mode %u\n",27372737+ pf->ptp.tx_interrupt_mode);27382738+ break;27392739+ }27402740+27412741+ /* Check hardware indicator */27422742+ for (i = 0; i < ICE_GET_QUAD_NUM(hw->ptp.num_lports); i++) {27432743+ u64 tstamp_ready = 0;27442744+ int err;27452745+27462746+ err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);27472747+ if (err || tstamp_ready)27482748+ return true;27492749+ }27502750+27512751+ return false;26482752}2649275326502754/**···27582738 return IRQ_WAKE_THREAD;27592739 case ICE_MAC_E830:27602740 /* E830 can read timestamps in the top half using rd32() */27612761- if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) {27412741+ ice_ptp_process_ts(pf);27422742+27432743+ if (ice_ptp_tx_tstamps_pending(pf)) {27622744 /* Process outstanding Tx timestamps. If there27632745 * is more work, re-arm the interrupt to trigger again.27642746 */···28402818}2841281928422820/**28212821+ * ice_ptp_queue_work - Queue PTP periodic work for a PF28222822+ * @pf: Board private structure28232823+ *28242824+ * Helper function to queue PTP periodic work after VSI rebuild completes.28252825+ * This ensures that PTP work only runs when VSI structures are ready.28262826+ */28272827+void ice_ptp_queue_work(struct ice_pf *pf)28282828+{28292829+ if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags) &&28302830+ pf->ptp.state == ICE_PTP_READY)28312831+ kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work, 0);28322832+}28332833+28342834+/**28432835 * ice_ptp_prepare_rebuild_sec - Prepare second NAC for PTP reset or rebuild28442836 * @pf: Board private structure28452837 * @rebuild: rebuild if true, prepare if false···28712835 struct ice_pf *peer_pf = ptp_port_to_pf(port);2872283628732837 if (!ice_is_primary(&peer_pf->hw)) {28742874- if (rebuild)28382838+ if (rebuild) {28392839+ /* TODO: When implementing rebuild=true:28402840+ * 1. Ensure secondary PFs' VSIs are rebuilt28412841+ * 2. Call ice_ptp_queue_work(peer_pf) after VSI rebuild28422842+ */28752843 ice_ptp_rebuild(peer_pf, reset_type);28762876- else28442844+ } else {28772845 ice_ptp_prepare_for_reset(peer_pf, reset_type);28462846+ }28782847 }28792848 }28802849}···30242983 }3025298430262985 ptp->state = ICE_PTP_READY;30273027-30283028- /* Start periodic work going */30293029- kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0);3030298630312987 dev_info(ice_pf_to_dev(pf), "PTP reset successful\n");30322988 return;···32293191{32303192 switch (pf->hw.mac_type) {32313193 case ICE_MAC_GENERIC:32323232- /* E822 based PHY has the clock owner process the interrupt32333233- * for all ports.31943194+ case ICE_MAC_GENERIC_3K_E825:31953195+ /* E82x hardware has the clock owner process timestamps for31963196+ * all ports.32343197 */32353198 if (ice_pf_src_tmr_owned(pf))32363199 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_ALL;
···15671567 /* the macvlan port may be freed by macvlan_uninit when fail to register.15681568 * so we destroy the macvlan port only when it's valid.15691569 */15701570- if (create && macvlan_port_get_rtnl(lowerdev)) {15701570+ if (macvlan_port_get_rtnl(lowerdev)) {15711571 macvlan_flush_sources(port, vlan);15721572- macvlan_port_destroy(port->dev);15721572+ if (create)15731573+ macvlan_port_destroy(port->dev);15731574 }15741575 return err;15751576}
···85358535 usb_submit_urb(tp->intr_urb, GFP_NOIO);85368536 }8537853785388538- /* If the device is RTL8152_INACCESSIBLE here then we should do a85398539- * reset. This is important because the usb_lock_device_for_reset()85408540- * that happens as a result of usb_queue_reset_device() will silently85418541- * fail if the device was suspended or if too much time passed.85428542- *85438543- * NOTE: The device is locked here so we can directly do the reset.85448544- * We don't need usb_lock_device_for_reset() because that's just a85458545- * wrapper over device_lock() and device_resume() (which calls us)85468546- * does that for us.85478547- */85488548- if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))85498549- usb_reset_device(tp->udev);85508550-85518538 return 0;85528539}85538540···86458658static int rtl8152_resume(struct usb_interface *intf)86468659{86478660 struct r8152 *tp = usb_get_intfdata(intf);86618661+ bool runtime_resume = test_bit(SELECTIVE_SUSPEND, &tp->flags);86488662 int ret;8649866386508664 mutex_lock(&tp->control);8651866586528666 rtl_reset_ocp_base(tp);8653866786548654- if (test_bit(SELECTIVE_SUSPEND, &tp->flags))86688668+ if (runtime_resume)86558669 ret = rtl8152_runtime_resume(tp);86568670 else86578671 ret = rtl8152_system_resume(tp);8658867286598673 mutex_unlock(&tp->control);86748674+86758675+ /* If the device is RTL8152_INACCESSIBLE here then we should do a86768676+ * reset. This is important because the usb_lock_device_for_reset()86778677+ * that happens as a result of usb_queue_reset_device() will silently86788678+ * fail if the device was suspended or if too much time passed.86798679+ *86808680+ * NOTE: The device is locked here so we can directly do the reset.86818681+ * We don't need usb_lock_device_for_reset() because that's just a86828682+ * wrapper over device_lock() and device_resume() (which calls us)86838683+ * does that for us.86848684+ */86858685+ if (!runtime_resume && test_bit(RTL8152_INACCESSIBLE, &tp->flags))86868686+ usb_reset_device(tp->udev);8660868786618688 return ret;86628689}
···340340341341 of_genpd_del_provider(pdev->dev.of_node);342342343343- for (i = 0; bc->onecell_data.num_domains; i++) {343343+ for (i = 0; i < bc->onecell_data.num_domains; i++) {344344 struct imx8m_blk_ctrl_domain *domain = &bc->domains[i];345345346346 pm_genpd_remove(&domain->genpd);
···396396 */397397bool qcom_smem_is_available(void)398398{399399- return !!__smem;399399+ return !IS_ERR(__smem);400400}401401EXPORT_SYMBOL_GPL(qcom_smem_is_available);402402···12471247{12481248 platform_device_unregister(__smem->socinfo);1249124912501250- __smem = NULL;12501250+ /* Set to -EPROBE_DEFER to signal unprobed state */12511251+ __smem = ERR_PTR(-EPROBE_DEFER);12511252}1252125312531254static const struct of_device_id qcom_smem_of_match[] = {
+3
drivers/spi/spi-tegra114.c
···978978 if (spi_get_csgpiod(spi, 0))979979 gpiod_set_value(spi_get_csgpiod(spi, 0), 0);980980981981+ /* Update default register to include CS polarity and SPI mode */981982 val = tspi->def_command1_reg;982983 if (spi->mode & SPI_CS_HIGH)983984 val &= ~SPI_CS_POL_INACTIVE(spi_get_chipselect(spi, 0));984985 else985986 val |= SPI_CS_POL_INACTIVE(spi_get_chipselect(spi, 0));987987+ val &= ~SPI_CONTROL_MODE_MASK;988988+ val |= SPI_MODE_SEL(spi->mode & 0x3);986989 tspi->def_command1_reg = val;987990 tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);988991 spin_unlock_irqrestore(&tspi->lock, flags);
+4-2
drivers/spi/spi-tegra20-slink.c
···10861086 reset_control_deassert(tspi->rst);1087108710881088 spi_irq = platform_get_irq(pdev, 0);10891089- if (spi_irq < 0)10901090- return spi_irq;10891089+ if (spi_irq < 0) {10901090+ ret = spi_irq;10911091+ goto exit_pm_put;10921092+ }10911093 tspi->irq = spi_irq;10921094 ret = request_threaded_irq(tspi->irq, tegra_slink_isr,10931095 tegra_slink_isr_thread, IRQF_ONESHOT,
+52-4
drivers/spi/spi-tegra210-quad.c
···839839 u32 command1, command2, speed = t->speed_hz;840840 u8 bits_per_word = t->bits_per_word;841841 u32 tx_tap = 0, rx_tap = 0;842842+ unsigned long flags;842843 int req_mode;843844844845 if (!has_acpi_companion(tqspi->dev) && speed != tqspi->cur_speed) {···847846 tqspi->cur_speed = speed;848847 }849848849849+ spin_lock_irqsave(&tqspi->lock, flags);850850 tqspi->cur_pos = 0;851851 tqspi->cur_rx_pos = 0;852852 tqspi->cur_tx_pos = 0;853853 tqspi->curr_xfer = t;854854+ spin_unlock_irqrestore(&tqspi->lock, flags);854855855856 if (is_first_of_msg) {856857 tegra_qspi_mask_clear_irq(tqspi);···11611158 u32 address_value = 0;11621159 u32 cmd_config = 0, addr_config = 0;11631160 u8 cmd_value = 0, val = 0;11611161+ unsigned long flags;1164116211651163 /* Enable Combined sequence mode */11661164 val = tegra_qspi_readl(tqspi, QSPI_GLOBAL_CONFIG);···12651261 tegra_qspi_transfer_end(spi);12661262 spi_transfer_delay_exec(xfer);12671263 }12641264+ spin_lock_irqsave(&tqspi->lock, flags);12681265 tqspi->curr_xfer = NULL;12661266+ spin_unlock_irqrestore(&tqspi->lock, flags);12691267 transfer_phase++;12701268 }12711269 ret = 0;1272127012731271exit:12721272+ spin_lock_irqsave(&tqspi->lock, flags);12741273 tqspi->curr_xfer = NULL;12741274+ spin_unlock_irqrestore(&tqspi->lock, flags);12751275 msg->status = ret;1276127612771277 return ret;···12881280 struct spi_transfer *transfer;12891281 bool is_first_msg = true;12901282 int ret = 0, val = 0;12831283+ unsigned long flags;1291128412921285 msg->status = 0;12931286 msg->actual_length = 0;···13691360 msg->actual_length += xfer->len + dummy_bytes;1370136113711362complete_xfer:13631363+ spin_lock_irqsave(&tqspi->lock, flags);13721364 tqspi->curr_xfer = NULL;13651365+ spin_unlock_irqrestore(&tqspi->lock, flags);1373136613741367 if (ret < 0) {13751368 tegra_qspi_transfer_end(spi);···1451144014521441static irqreturn_t handle_cpu_based_xfer(struct tegra_qspi *tqspi)14531442{14541454- struct spi_transfer *t = tqspi->curr_xfer;14431443+ struct spi_transfer *t;14551444 unsigned long flags;1456144514571446 spin_lock_irqsave(&tqspi->lock, flags);14471447+ t = tqspi->curr_xfer;14481448+14491449+ if (!t) {14501450+ spin_unlock_irqrestore(&tqspi->lock, flags);14511451+ return IRQ_HANDLED;14521452+ }1458145314591454 if (tqspi->tx_status || tqspi->rx_status) {14601455 tegra_qspi_handle_error(tqspi);···1491147414921475static irqreturn_t handle_dma_based_xfer(struct tegra_qspi *tqspi)14931476{14941494- struct spi_transfer *t = tqspi->curr_xfer;14771477+ struct spi_transfer *t;14951478 unsigned int total_fifo_words;14961479 unsigned long flags;14971480 long wait_status;···15301513 }1531151415321515 spin_lock_irqsave(&tqspi->lock, flags);15161516+ t = tqspi->curr_xfer;15171517+15181518+ if (!t) {15191519+ spin_unlock_irqrestore(&tqspi->lock, flags);15201520+ return IRQ_HANDLED;15211521+ }1533152215341523 if (num_errors) {15351524 tegra_qspi_dma_unmap_xfer(tqspi, t);···15751552static irqreturn_t tegra_qspi_isr_thread(int irq, void *context_data)15761553{15771554 struct tegra_qspi *tqspi = context_data;15551555+ unsigned long flags;15561556+ u32 status;15571557+15581558+ /*15591559+ * Read transfer status to check if interrupt was triggered by transfer15601560+ * completion15611561+ */15621562+ status = tegra_qspi_readl(tqspi, QSPI_TRANS_STATUS);1578156315791564 /*15801565 * Occasionally the IRQ thread takes a long time to wake up (usually15811566 * when the CPU that it's running on is excessively busy) and we have15821567 * already reached the timeout before and cleaned up the timed out15831568 * transfer. Avoid any processing in that case and bail out early.15691569+ *15701570+ * If no transfer is in progress, check if this was a real interrupt15711571+ * that the timeout handler already processed, or a spurious one.15841572 */15851585- if (!tqspi->curr_xfer)15861586- return IRQ_NONE;15731573+ spin_lock_irqsave(&tqspi->lock, flags);15741574+ if (!tqspi->curr_xfer) {15751575+ spin_unlock_irqrestore(&tqspi->lock, flags);15761576+ /* Spurious interrupt - transfer not ready */15771577+ if (!(status & QSPI_RDY))15781578+ return IRQ_NONE;15791579+ /* Real interrupt, already handled by timeout path */15801580+ return IRQ_HANDLED;15811581+ }1587158215881583 tqspi->status_reg = tegra_qspi_readl(tqspi, QSPI_FIFO_STATUS);15891584···16121571 tqspi->rx_status = tqspi->status_reg & (QSPI_RX_FIFO_OVF | QSPI_RX_FIFO_UNF);1613157216141573 tegra_qspi_mask_clear_irq(tqspi);15741574+ spin_unlock_irqrestore(&tqspi->lock, flags);1615157515761576+ /*15771577+ * Lock is released here but handlers safely re-check curr_xfer under15781578+ * lock before dereferencing.15791579+ * DMA handler also needs to sleep in wait_for_completion_*(), which15801580+ * cannot be done while holding spinlock.15811581+ */16161582 if (!tqspi->is_curr_dma_xfer)16171583 return handle_cpu_based_xfer(tqspi);16181584
+49-53
drivers/usb/gadget/function/f_fs.c
···5959 __attribute__((malloc));60606161/* Opened counter handling. */6262-static void ffs_data_opened(struct ffs_data *ffs);6362static void ffs_data_closed(struct ffs_data *ffs);64636564/* Called with ffs->mutex held; take over ownership of data. */···635636 return ret;636637}637638639639+640640+static void ffs_data_reset(struct ffs_data *ffs);641641+638642static int ffs_ep0_open(struct inode *inode, struct file *file)639643{640644 struct ffs_data *ffs = inode->i_sb->s_fs_info;641641- int ret;642645643643- /* Acquire mutex */644644- ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);645645- if (ret < 0)646646- return ret;647647-648648- ffs_data_opened(ffs);646646+ spin_lock_irq(&ffs->eps_lock);649647 if (ffs->state == FFS_CLOSING) {650650- ffs_data_closed(ffs);651651- mutex_unlock(&ffs->mutex);648648+ spin_unlock_irq(&ffs->eps_lock);652649 return -EBUSY;653650 }654654- mutex_unlock(&ffs->mutex);651651+ if (!ffs->opened++ && ffs->state == FFS_DEACTIVATED) {652652+ ffs->state = FFS_CLOSING;653653+ spin_unlock_irq(&ffs->eps_lock);654654+ ffs_data_reset(ffs);655655+ } else {656656+ spin_unlock_irq(&ffs->eps_lock);657657+ }655658 file->private_data = ffs;656659657660 return stream_open(inode, file);···12031202{12041203 struct ffs_data *ffs = inode->i_sb->s_fs_info;12051204 struct ffs_epfile *epfile;12061206- int ret;1207120512081208- /* Acquire mutex */12091209- ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);12101210- if (ret < 0)12111211- return ret;12121212-12131213- if (!atomic_inc_not_zero(&ffs->opened)) {12141214- mutex_unlock(&ffs->mutex);12061206+ spin_lock_irq(&ffs->eps_lock);12071207+ if (!ffs->opened) {12081208+ spin_unlock_irq(&ffs->eps_lock);12151209 return -ENODEV;12161210 }12171211 /*···12161220 */12171221 epfile = smp_load_acquire(&inode->i_private);12181222 if (unlikely(ffs->state != FFS_ACTIVE || !epfile)) {12191219- mutex_unlock(&ffs->mutex);12201220- ffs_data_closed(ffs);12231223+ spin_unlock_irq(&ffs->eps_lock);12211224 return -ENODEV;12221225 }12231223- mutex_unlock(&ffs->mutex);12261226+ ffs->opened++;12271227+ spin_unlock_irq(&ffs->eps_lock);1224122812251229 file->private_data = epfile;12261230 return stream_open(inode, file);···20882092 return 0;20892093}2090209420912091-static void ffs_data_reset(struct ffs_data *ffs);20922092-20932095static void20942096ffs_fs_kill_sb(struct super_block *sb)20952097{···21442150 refcount_inc(&ffs->ref);21452151}2146215221472147-static void ffs_data_opened(struct ffs_data *ffs)21482148-{21492149- if (atomic_add_return(1, &ffs->opened) == 1 &&21502150- ffs->state == FFS_DEACTIVATED) {21512151- ffs->state = FFS_CLOSING;21522152- ffs_data_reset(ffs);21532153- }21542154-}21552155-21562153static void ffs_data_put(struct ffs_data *ffs)21572154{21582155 if (refcount_dec_and_test(&ffs->ref)) {···2161217621622177static void ffs_data_closed(struct ffs_data *ffs)21632178{21642164- if (atomic_dec_and_test(&ffs->opened)) {21652165- if (ffs->no_disconnect) {21662166- struct ffs_epfile *epfiles;21672167- unsigned long flags;21792179+ spin_lock_irq(&ffs->eps_lock);21802180+ if (--ffs->opened) { // not the last opener?21812181+ spin_unlock_irq(&ffs->eps_lock);21822182+ return;21832183+ }21842184+ if (ffs->no_disconnect) {21852185+ struct ffs_epfile *epfiles;2168218621692169- ffs->state = FFS_DEACTIVATED;21702170- spin_lock_irqsave(&ffs->eps_lock, flags);21712171- epfiles = ffs->epfiles;21722172- ffs->epfiles = NULL;21732173- spin_unlock_irqrestore(&ffs->eps_lock,21742174- flags);21872187+ ffs->state = FFS_DEACTIVATED;21882188+ epfiles = ffs->epfiles;21892189+ ffs->epfiles = NULL;21902190+ spin_unlock_irq(&ffs->eps_lock);2175219121762176- if (epfiles)21772177- ffs_epfiles_destroy(ffs->sb, epfiles,21782178- ffs->eps_count);21922192+ if (epfiles)21932193+ ffs_epfiles_destroy(ffs->sb, epfiles,21942194+ ffs->eps_count);2179219521802180- if (ffs->setup_state == FFS_SETUP_PENDING)21812181- __ffs_ep0_stall(ffs);21822182- } else {21832183- ffs->state = FFS_CLOSING;21842184- ffs_data_reset(ffs);21852185- }21962196+ if (ffs->setup_state == FFS_SETUP_PENDING)21972197+ __ffs_ep0_stall(ffs);21982198+ } else {21992199+ ffs->state = FFS_CLOSING;22002200+ spin_unlock_irq(&ffs->eps_lock);22012201+ ffs_data_reset(ffs);21862202 }21872203}21882204···22002214 }2201221522022216 refcount_set(&ffs->ref, 1);22032203- atomic_set(&ffs->opened, 0);22172217+ ffs->opened = 0;22042218 ffs->state = FFS_READ_DESCRIPTORS;22052219 mutex_init(&ffs->mutex);22062220 spin_lock_init(&ffs->eps_lock);···22522266{22532267 ffs_data_clear(ffs);2254226822692269+ spin_lock_irq(&ffs->eps_lock);22552270 ffs->raw_descs_data = NULL;22562271 ffs->raw_descs = NULL;22572272 ffs->raw_strings = NULL;···22762289 ffs->ms_os_descs_ext_prop_count = 0;22772290 ffs->ms_os_descs_ext_prop_name_len = 0;22782291 ffs->ms_os_descs_ext_prop_data_len = 0;22922292+ spin_unlock_irq(&ffs->eps_lock);22792293}2280229422812295···37443756{37453757 struct ffs_function *func = ffs_func_from_usb(f);37463758 struct ffs_data *ffs = func->ffs;37593759+ unsigned long flags;37473760 int ret = 0, intf;3748376137493762 if (alt > MAX_ALT_SETTINGS)···37573768 if (ffs->func)37583769 ffs_func_eps_disable(ffs->func);3759377037713771+ spin_lock_irqsave(&ffs->eps_lock, flags);37603772 if (ffs->state == FFS_DEACTIVATED) {37613773 ffs->state = FFS_CLOSING;37743774+ spin_unlock_irqrestore(&ffs->eps_lock, flags);37623775 INIT_WORK(&ffs->reset_work, ffs_reset_work);37633776 schedule_work(&ffs->reset_work);37643777 return -ENODEV;37653778 }37793779+ spin_unlock_irqrestore(&ffs->eps_lock, flags);3766378037673781 if (ffs->state != FFS_ACTIVE)37683782 return -ENODEV;···37833791{37843792 struct ffs_function *func = ffs_func_from_usb(f);37853793 struct ffs_data *ffs = func->ffs;37943794+ unsigned long flags;3786379537873796 if (ffs->func)37883797 ffs_func_eps_disable(ffs->func);3789379837993799+ spin_lock_irqsave(&ffs->eps_lock, flags);37903800 if (ffs->state == FFS_DEACTIVATED) {37913801 ffs->state = FFS_CLOSING;38023802+ spin_unlock_irqrestore(&ffs->eps_lock, flags);37923803 INIT_WORK(&ffs->reset_work, ffs_reset_work);37933804 schedule_work(&ffs->reset_work);37943805 return;37953806 }38073807+ spin_unlock_irqrestore(&ffs->eps_lock, flags);3796380837973809 if (ffs->state == FFS_ACTIVE) {37983810 ffs->func = NULL;
+1-1
drivers/usb/gadget/function/u_fs.h
···176176 /* reference counter */177177 refcount_t ref;178178 /* how many files are opened (EP0 and others) */179179- atomic_t opened;179179+ int opened;180180181181 /* EP0 state */182182 enum ffs_state state;
···3131#define CEPH_INO_CEPH 2 /* hidden .ceph dir */3232#define CEPH_INO_GLOBAL_SNAPREALM 3 /* global dummy snaprealm */33333434+/*3535+ * name for "old" CephFS file systems,3636+ * see ceph.git e2b151d009640114b2565c901d6f41f6cd5ec6523737+ */3838+#define CEPH_OLD_FS_NAME "cephfs"3939+3440/* arbitrary limit on max # of monitors (cluster of 3 is typical) */3541#define CEPH_MAX_MON 313642
···2626/**2727 * struct pci_ide_partner - Per port pair Selective IDE Stream settings2828 * @rid_start: Partner Port Requester ID range start2929- * @rid_end: Partner Port Requester ID range end2929+ * @rid_end: Partner Port Requester ID range end (inclusive)3030 * @stream_index: Selective IDE Stream Register Block selection3131 * @mem_assoc: PCI bus memory address association for targeting peer partner3232 * @pref_assoc: PCI bus prefetchable memory address association for···8282 * @host_bridge_stream: allocated from host bridge @ide_stream_ida pool8383 * @stream_id: unique Stream ID (within Partner Port pairing)8484 * @name: name of the established Selective IDE Stream in sysfs8585- * @tsm_dev: For TSM established IDE, the TSM device context8685 *8786 * Negative @stream_id values indicate "uninitialized" on the8887 * expectation that with TSM established IDE the TSM owns the stream_id···9394 u8 host_bridge_stream;9495 int stream_id;9596 const char *name;9696- struct tsm_dev *tsm_dev;9797};98989999/*
+2-4
include/linux/rseq_types.h
···121121/**122122 * struct mm_mm_cid - Storage for per MM CID data123123 * @pcpu: Per CPU storage for CIDs associated to a CPU124124- * @percpu: Set, when CIDs are in per CPU mode125125- * @transit: Set to MM_CID_TRANSIT during a mode change transition phase124124+ * @mode: Indicates per CPU and transition mode126125 * @max_cids: The exclusive maximum CID value for allocation and convergence127126 * @irq_work: irq_work to handle the affinity mode change case128127 * @work: Regular work to handle the affinity mode change case···138139struct mm_mm_cid {139140 /* Hotpath read mostly members */140141 struct mm_cid_pcpu __percpu *pcpu;141141- unsigned int percpu;142142- unsigned int transit;142142+ unsigned int mode;143143 unsigned int max_cids;144144145145 /* Rarely used. Moves @lock and @mutex into the second cacheline */
+12
include/linux/skbuff.h
···43014301 skb_headlen(skb), buffer);43024302}4303430343044304+/* Variant of skb_header_pointer() where @offset is user-controlled43054305+ * and potentially negative.43064306+ */43074307+static inline void * __must_check43084308+skb_header_pointer_careful(const struct sk_buff *skb, int offset,43094309+ int len, void *buffer)43104310+{43114311+ if (unlikely(offset < 0 && -offset > skb_headroom(skb)))43124312+ return NULL;43134313+ return skb_header_pointer(skb, offset, len, buffer);43144314+}43154315+43044316static inline void * __must_check43054317skb_pointer_if_linear(const struct sk_buff *skb, int offset, int len)43064318{
···1026910269 * Serialization rules:1027010270 *1027110271 * mm::mm_cid::mutex: Serializes fork() and exit() and therefore1027210272- * protects mm::mm_cid::users.1027210272+ * protects mm::mm_cid::users and mode switch1027310273+ * transitions1027310274 *1027410275 * mm::mm_cid::lock: Serializes mm_update_max_cids() and1027510276 * mm_update_cpus_allowed(). Nests in mm_cid::mutex···1028610285 *1028710286 * A CID is either owned by a task (stored in task_struct::mm_cid.cid) or1028810287 * by a CPU (stored in mm::mm_cid.pcpu::cid). CIDs owned by CPUs have the1028910289- * MM_CID_ONCPU bit set. During transition from CPU to task ownership mode,1029010290- * MM_CID_TRANSIT is set on the per task CIDs. When this bit is set the1029110291- * task needs to drop the CID into the pool when scheduling out. Both bits1029210292- * (ONCPU and TRANSIT) are filtered out by task_cid() when the CID is1029310293- * actually handed over to user space in the RSEQ memory.1028810288+ * MM_CID_ONCPU bit set.1028910289+ *1029010290+ * During the transition of ownership mode, the MM_CID_TRANSIT bit is set1029110291+ * on the CIDs. When this bit is set the tasks drop the CID back into the1029210292+ * pool when scheduling out.1029310293+ *1029410294+ * Both bits (ONCPU and TRANSIT) are filtered out by task_cid() when the1029510295+ * CID is actually handed over to user space in the RSEQ memory.1029410296 *1029510297 * Mode switching:1029810298+ *1029910299+ * The ownership mode is per process and stored in mm:mm_cid::mode with the1030010300+ * following possible states:1030110301+ *1030210302+ * 0: Per task ownership1030310303+ * 0 | MM_CID_TRANSIT: Transition from per CPU to per task1030410304+ * MM_CID_ONCPU: Per CPU ownership1030510305+ * MM_CID_ONCPU | MM_CID_TRANSIT: Transition from per task to per CPU1030610306+ *1030710307+ * All transitions of ownership mode happen in two phases:1030810308+ *1030910309+ * 1) mm:mm_cid::mode has the MM_CID_TRANSIT bit set. This is OR'ed on the1031010310+ * CIDs and denotes that the CID is only temporarily owned by a1031110311+ * task. When the task schedules out it drops the CID back into the1031210312+ * pool if this bit is set.1031310313+ *1031410314+ * 2) The initiating context walks the per CPU space or the tasks to fixup1031510315+ * or drop the CIDs and after completion it clears MM_CID_TRANSIT in1031610316+ * mm:mm_cid::mode. After that point the CIDs are strictly task or CPU1031710317+ * owned again.1031810318+ *1031910319+ * This two phase transition is required to prevent CID space exhaustion1032010320+ * during the transition as a direct transfer of ownership would fail:1032110321+ *1032210322+ * - On task to CPU mode switch if a task is scheduled in on one CPU and1032310323+ * then migrated to another CPU before the fixup freed enough per task1032410324+ * CIDs.1032510325+ *1032610326+ * - On CPU to task mode switch if two tasks are scheduled in on the same1032710327+ * CPU before the fixup freed per CPU CIDs.1032810328+ *1032910329+ * Both scenarios can result in a live lock because sched_in() is invoked1033010330+ * with runqueue lock held and loops in search of a CID and the fixup1033110331+ * thread can't make progress freeing them up because it is stuck on the1033210332+ * same runqueue lock.1033310333+ *1033410334+ * While MM_CID_TRANSIT is active during the transition phase the MM_CID1033510335+ * bitmap can be contended, but that's a temporary contention bound to the1033610336+ * transition period. After that everything goes back into steady state and1033710337+ * nothing except fork() and exit() will touch the bitmap. This is an1033810338+ * acceptable tradeoff as it completely avoids complex serialization,1033910339+ * memory barriers and atomic operations for the common case.1034010340+ *1034110341+ * Aside of that this mechanism also ensures RT compability:1034210342+ *1034310343+ * - The task which runs the fixup is fully preemptible except for the1034410344+ * short runqueue lock held sections.1034510345+ *1034610346+ * - The transient impact of the bitmap contention is only problematic1034710347+ * when there is a thundering herd scenario of tasks scheduling in and1034810348+ * out concurrently. There is not much which can be done about that1034910349+ * except for avoiding mode switching by a proper overall system1035010350+ * configuration.1029610351 *1029710352 * Switching to per CPU mode happens when the user count becomes greater1029810353 * than the maximum number of CIDs, which is calculated by:···1036310306 *1036410307 * At the point of switching to per CPU mode the new user is not yet1036510308 * visible in the system, so the task which initiated the fork() runs the1036610366- * fixup function: mm_cid_fixup_tasks_to_cpu() walks the thread list and1036710367- * either transfers each tasks owned CID to the CPU the task runs on or1036810368- * drops it into the CID pool if a task is not on a CPU at that point in1036910369- * time. Tasks which schedule in before the task walk reaches them do the1037010370- * handover in mm_cid_schedin(). When mm_cid_fixup_tasks_to_cpus() completes1037110371- * it's guaranteed that no task related to that MM owns a CID anymore.1030910309+ * fixup function. mm_cid_fixup_tasks_to_cpu() walks the thread list and1031010310+ * either marks each task owned CID with MM_CID_TRANSIT if the task is1031110311+ * running on a CPU or drops it into the CID pool if a task is not on a1031210312+ * CPU. Tasks which schedule in before the task walk reaches them do the1031310313+ * handover in mm_cid_schedin(). When mm_cid_fixup_tasks_to_cpus()1031410314+ * completes it is guaranteed that no task related to that MM owns a CID1031510315+ * anymore.1037210316 *1037310317 * Switching back to task mode happens when the user count goes below the1037410318 * threshold which was recorded on the per CPU mode switch:···1038510327 * run either in the deferred update function in context of a workqueue or1038610328 * by a task which forks a new one or by a task which exits. Whatever1038710329 * happens first. mm_cid_fixup_cpus_to_task() walks through the possible1038810388- * CPUs and either transfers the CPU owned CIDs to a related task which1038910389- * runs on the CPU or drops it into the pool. Tasks which schedule in on a1039010390- * CPU which the walk did not cover yet do the handover themself.1039110391- *1039210392- * This transition from CPU to per task ownership happens in two phases:1039310393- *1039410394- * 1) mm:mm_cid.transit contains MM_CID_TRANSIT This is OR'ed on the task1039510395- * CID and denotes that the CID is only temporarily owned by the1039610396- * task. When it schedules out the task drops the CID back into the1039710397- * pool if this bit is set.1039810398- *1039910399- * 2) The initiating context walks the per CPU space and after completion1040010400- * clears mm:mm_cid.transit. So after that point the CIDs are strictly1040110401- * task owned again.1040210402- *1040310403- * This two phase transition is required to prevent CID space exhaustion1040410404- * during the transition as a direct transfer of ownership would fail if1040510405- * two tasks are scheduled in on the same CPU before the fixup freed per1040610406- * CPU CIDs.1040710407- *1040810408- * When mm_cid_fixup_cpus_to_tasks() completes it's guaranteed that no CID1040910409- * related to that MM is owned by a CPU anymore.1033010330+ * CPUs and either marks the CPU owned CIDs with MM_CID_TRANSIT if a1033110331+ * related task is running on the CPU or drops it into the pool. Tasks1033210332+ * which are scheduled in before the fixup covered them do the handover1033310333+ * themself. When mm_cid_fixup_cpus_to_tasks() completes it is guaranteed1033410334+ * that no CID related to that MM is owned by a CPU anymore.1041010335 */10411103361041210337/*···1042010379static bool mm_update_max_cids(struct mm_struct *mm)1042110380{1042210381 struct mm_mm_cid *mc = &mm->mm_cid;1038210382+ bool percpu = cid_on_cpu(mc->mode);10423103831042410384 lockdep_assert_held(&mm->mm_cid.lock);1042510385···1042910387 __mm_update_max_cids(mc);10430103881043110389 /* Check whether owner mode must be changed */1043210432- if (!mc->percpu) {1039010390+ if (!percpu) {1043310391 /* Enable per CPU mode when the number of users is above max_cids */1043410392 if (mc->users > mc->max_cids)1043510393 mc->pcpu_thrs = mm_cid_calc_pcpu_thrs(mc);···1044010398 }10441103991044210400 /* Mode change required? */1044310443- if (!!mc->percpu == !!mc->pcpu_thrs)1040110401+ if (percpu == !!mc->pcpu_thrs)1044410402 return false;1044510445- /* When switching back to per TASK mode, set the transition flag */1044610446- if (!mc->pcpu_thrs)1044710447- WRITE_ONCE(mc->transit, MM_CID_TRANSIT);1044810448- WRITE_ONCE(mc->percpu, !!mc->pcpu_thrs);1040310403+1040410404+ /* Flip the mode and set the transition flag to bridge the transfer */1040510405+ WRITE_ONCE(mc->mode, mc->mode ^ (MM_CID_TRANSIT | MM_CID_ONCPU));1040610406+ /*1040710407+ * Order the store against the subsequent fixups so that1040810408+ * acquire(rq::lock) cannot be reordered by the CPU before the1040910409+ * store.1041010410+ */1041110411+ smp_mb();1044910412 return true;1045010413}1045110414···10475104281047610429 WRITE_ONCE(mc->nr_cpus_allowed, weight);1047710430 __mm_update_max_cids(mc);1047810478- if (!mc->percpu)1043110431+ if (!cid_on_cpu(mc->mode))1047910432 return;10480104331048110434 /* Adjust the threshold to the wider set */···1049110444 /* Queue the irq work, which schedules the real work */1049210445 mc->update_deferred = true;1049310446 irq_work_queue(&mc->irq_work);1044710447+}1044810448+1044910449+static inline void mm_cid_complete_transit(struct mm_struct *mm, unsigned int mode)1045010450+{1045110451+ /*1045210452+ * Ensure that the store removing the TRANSIT bit cannot be1045310453+ * reordered by the CPU before the fixups have been completed.1045410454+ */1045510455+ smp_mb();1045610456+ WRITE_ONCE(mm->mm_cid.mode, mode);1049410457}10495104581049610459static inline void mm_cid_transit_to_task(struct task_struct *t, struct mm_cid_pcpu *pcp)···1054610489 }1054710490 }1054810491 }1054910549- /* Clear the transition bit */1055010550- WRITE_ONCE(mm->mm_cid.transit, 0);1049210492+ mm_cid_complete_transit(mm, 0);1055110493}10552104941055310553-static inline void mm_cid_transfer_to_cpu(struct task_struct *t, struct mm_cid_pcpu *pcp)1049510495+static inline void mm_cid_transit_to_cpu(struct task_struct *t, struct mm_cid_pcpu *pcp)1055410496{1055510497 if (cid_on_task(t->mm_cid.cid)) {1055610556- t->mm_cid.cid = cid_to_cpu_cid(t->mm_cid.cid);1049810498+ t->mm_cid.cid = cid_to_transit_cid(t->mm_cid.cid);1055710499 pcp->cid = t->mm_cid.cid;1055810500 }1055910501}···1056510509 if (!t->mm_cid.active)1056610510 return false;1056710511 if (cid_on_task(t->mm_cid.cid)) {1056810568- /* If running on the CPU, transfer the CID, otherwise drop it */1051210512+ /* If running on the CPU, put the CID in transit mode, otherwise drop it */1056910513 if (task_rq(t)->curr == t)1057010570- mm_cid_transfer_to_cpu(t, per_cpu_ptr(mm->mm_cid.pcpu, task_cpu(t)));1051410514+ mm_cid_transit_to_cpu(t, per_cpu_ptr(mm->mm_cid.pcpu, task_cpu(t)));1057110515 else1057210516 mm_unset_cid_on_task(t);1057310517 }1057410518 return true;1057510519}10576105201057710577-static void mm_cid_fixup_tasks_to_cpus(void)1052110521+static void mm_cid_do_fixup_tasks_to_cpus(struct mm_struct *mm)1057810522{1057910579- struct mm_struct *mm = current->mm;1058010523 struct task_struct *p, *t;1058110524 unsigned int users;1058210525···1061310558 }1061410559}10615105601056110561+static void mm_cid_fixup_tasks_to_cpus(void)1056210562+{1056310563+ struct mm_struct *mm = current->mm;1056410564+1056510565+ mm_cid_do_fixup_tasks_to_cpus(mm);1056610566+ mm_cid_complete_transit(mm, MM_CID_ONCPU);1056710567+}1056810568+1061610569static bool sched_mm_cid_add_user(struct task_struct *t, struct mm_struct *mm)1061710570{1061810571 t->mm_cid.active = 1;···1064910586 }10650105871065110588 if (!sched_mm_cid_add_user(t, mm)) {1065210652- if (!mm->mm_cid.percpu)1058910589+ if (!cid_on_cpu(mm->mm_cid.mode))1065310590 t->mm_cid.cid = mm_get_cid(mm);1065410591 return;1065510592 }10656105931065710594 /* Handle the mode change and transfer current's CID */1065810658- percpu = !!mm->mm_cid.percpu;1059510595+ percpu = cid_on_cpu(mm->mm_cid.mode);1065910596 if (!percpu)1066010597 mm_cid_transit_to_task(current, pcp);1066110598 else1066210662- mm_cid_transfer_to_cpu(current, pcp);1059910599+ mm_cid_transit_to_cpu(current, pcp);1066310600 }10664106011066510602 if (percpu) {···1069410631 * affinity change increased the number of allowed CPUs and the1069510632 * deferred fixup did not run yet.1069610633 */1069710697- if (WARN_ON_ONCE(mm->mm_cid.percpu))1063410634+ if (WARN_ON_ONCE(cid_on_cpu(mm->mm_cid.mode)))1069810635 return false;1069910636 /*1070010637 * A failed fork(2) cleanup never gets here, so @current must have···1072710664 scoped_guard(raw_spinlock_irq, &mm->mm_cid.lock) {1072810665 if (!__sched_mm_cid_exit(t))1072910666 return;1073010730- /* Mode change required. Transfer currents CID */1073110731- mm_cid_transit_to_task(current, this_cpu_ptr(mm->mm_cid.pcpu));1066710667+ /*1066810668+ * Mode change. The task has the CID unset1066910669+ * already. The CPU CID is still valid and1067010670+ * does not have MM_CID_TRANSIT set as the1067110671+ * mode change has just taken effect under1067210672+ * mm::mm_cid::lock. Drop it.1067310673+ */1067410674+ mm_drop_cid_on_cpu(mm, this_cpu_ptr(mm->mm_cid.pcpu));1073210675 }1073310676 mm_cid_fixup_cpus_to_tasks(mm);1073410677 return;···1079110722 if (!mm_update_max_cids(mm))1079210723 return;1079310724 /* Affinity changes can only switch back to task mode */1079410794- if (WARN_ON_ONCE(mm->mm_cid.percpu))1072510725+ if (WARN_ON_ONCE(cid_on_cpu(mm->mm_cid.mode)))1079510726 return;1079610727 }1079710728 mm_cid_fixup_cpus_to_tasks(mm);···1081210743void mm_init_cid(struct mm_struct *mm, struct task_struct *p)1081310744{1081410745 mm->mm_cid.max_cids = 0;1081510815- mm->mm_cid.percpu = 0;1081610816- mm->mm_cid.transit = 0;1074610746+ mm->mm_cid.mode = 0;1081710747 mm->mm_cid.nr_cpus_allowed = p->nr_cpus_allowed;1081810748 mm->mm_cid.users = 0;1081910749 mm->mm_cid.pcpu_thrs = 0;
+48
kernel/sched/ext.c
···194194#include <trace/events/sched_ext.h>195195196196static void process_ddsp_deferred_locals(struct rq *rq);197197+static bool task_dead_and_done(struct task_struct *p);197198static u32 reenq_local(struct rq *rq);198199static void scx_kick_cpu(struct scx_sched *sch, s32 cpu, u64 flags);199200static bool scx_vexit(struct scx_sched *sch, enum scx_exit_kind kind,···2620261926212620 set_cpus_allowed_common(p, ac);2622262126222622+ if (task_dead_and_done(p))26232623+ return;26242624+26232625 /*26242626 * The effective cpumask is stored in @p->cpus_ptr which may temporarily26252627 * differ from the configured one in @p->cpus_mask. Always tell the bpf···30383034 percpu_up_read(&scx_fork_rwsem);30393035}3040303630373037+/**30383038+ * task_dead_and_done - Is a task dead and done running?30393039+ * @p: target task30403040+ *30413041+ * Once sched_ext_dead() removes the dead task from scx_tasks and exits it, the30423042+ * task no longer exists from SCX's POV. However, certain sched_class ops may be30433043+ * invoked on these dead tasks leading to failures - e.g. sched_setscheduler()30443044+ * may try to switch a task which finished sched_ext_dead() back into SCX30453045+ * triggering invalid SCX task state transitions and worse.30463046+ *30473047+ * Once a task has finished the final switch, sched_ext_dead() is the only thing30483048+ * that needs to happen on the task. Use this test to short-circuit sched_class30493049+ * operations which may be called on dead tasks.30503050+ */30513051+static bool task_dead_and_done(struct task_struct *p)30523052+{30533053+ struct rq *rq = task_rq(p);30543054+30553055+ lockdep_assert_rq_held(rq);30563056+30573057+ /*30583058+ * In do_task_dead(), a dying task sets %TASK_DEAD with preemption30593059+ * disabled and __schedule(). If @p has %TASK_DEAD set and off CPU, @p30603060+ * won't ever run again.30613061+ */30623062+ return unlikely(READ_ONCE(p->__state) == TASK_DEAD) &&30633063+ !task_on_cpu(rq, p);30643064+}30653065+30413066void sched_ext_dead(struct task_struct *p)30423067{30433068 unsigned long flags;3044306930703070+ /*30713071+ * By the time control reaches here, @p has %TASK_DEAD set, switched out30723072+ * for the last time and then dropped the rq lock - task_dead_and_done()30733073+ * should be returning %true nullifying the straggling sched_class ops.30743074+ * Remove from scx_tasks and exit @p.30753075+ */30453076 raw_spin_lock_irqsave(&scx_tasks_lock, flags);30463077 list_del_init(&p->scx.tasks_node);30473078 raw_spin_unlock_irqrestore(&scx_tasks_lock, flags);···3102306331033064 lockdep_assert_rq_held(task_rq(p));3104306530663066+ if (task_dead_and_done(p))30673067+ return;30683068+31053069 p->scx.weight = sched_weight_to_cgroup(scale_load_down(lw->weight));31063070 if (SCX_HAS_OP(sch, set_weight))31073071 SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_weight, rq,···31193077{31203078 struct scx_sched *sch = scx_root;3121307930803080+ if (task_dead_and_done(p))30813081+ return;30823082+31223083 scx_enable_task(p);3123308431243085 /*···3135309031363091static void switched_from_scx(struct rq *rq, struct task_struct *p)31373092{30933093+ if (task_dead_and_done(p))30943094+ return;30953095+31383096 scx_disable_task(p);31393097}31403098
+35-9
kernel/sched/sched.h
···38163816 __this_cpu_write(mm->mm_cid.pcpu->cid, cid);38173817}3818381838193819-static __always_inline void mm_cid_from_cpu(struct task_struct *t, unsigned int cpu_cid)38193819+static __always_inline void mm_cid_from_cpu(struct task_struct *t, unsigned int cpu_cid,38203820+ unsigned int mode)38203821{38213822 unsigned int max_cids, tcid = t->mm_cid.cid;38223823 struct mm_struct *mm = t->mm;···38423841 /* Still nothing, allocate a new one */38433842 if (!cid_on_cpu(cpu_cid))38443843 cpu_cid = cid_to_cpu_cid(mm_get_cid(mm));38443844+38453845+ /* Handle the transition mode flag if required */38463846+ if (mode & MM_CID_TRANSIT)38473847+ cpu_cid = cpu_cid_to_cid(cpu_cid) | MM_CID_TRANSIT;38453848 }38463849 mm_cid_update_pcpu_cid(mm, cpu_cid);38473850 mm_cid_update_task_cid(t, cpu_cid);38483851}3849385238503850-static __always_inline void mm_cid_from_task(struct task_struct *t, unsigned int cpu_cid)38533853+static __always_inline void mm_cid_from_task(struct task_struct *t, unsigned int cpu_cid,38543854+ unsigned int mode)38513855{38523856 unsigned int max_cids, tcid = t->mm_cid.cid;38533857 struct mm_struct *mm = t->mm;···38783872 if (!cid_on_task(tcid))38793873 tcid = mm_get_cid(mm);38803874 /* Set the transition mode flag if required */38813881- tcid |= READ_ONCE(mm->mm_cid.transit);38753875+ tcid |= mode & MM_CID_TRANSIT;38823876 }38833877 mm_cid_update_pcpu_cid(mm, tcid);38843878 mm_cid_update_task_cid(t, tcid);···38873881static __always_inline void mm_cid_schedin(struct task_struct *next)38883882{38893883 struct mm_struct *mm = next->mm;38903890- unsigned int cpu_cid;38843884+ unsigned int cpu_cid, mode;3891388538923886 if (!next->mm_cid.active)38933887 return;3894388838953889 cpu_cid = __this_cpu_read(mm->mm_cid.pcpu->cid);38963896- if (likely(!READ_ONCE(mm->mm_cid.percpu)))38973897- mm_cid_from_task(next, cpu_cid);38903890+ mode = READ_ONCE(mm->mm_cid.mode);38913891+ if (likely(!cid_on_cpu(mode)))38923892+ mm_cid_from_task(next, cpu_cid, mode);38983893 else38993899- mm_cid_from_cpu(next, cpu_cid);38943894+ mm_cid_from_cpu(next, cpu_cid, mode);39003895}3901389639023897static __always_inline void mm_cid_schedout(struct task_struct *prev)39033898{38993899+ struct mm_struct *mm = prev->mm;39003900+ unsigned int mode, cid;39013901+39043902 /* During mode transitions CIDs are temporary and need to be dropped */39053903 if (likely(!cid_in_transit(prev->mm_cid.cid)))39063904 return;3907390539083908- mm_drop_cid(prev->mm, cid_from_transit_cid(prev->mm_cid.cid));39093909- prev->mm_cid.cid = MM_CID_UNSET;39063906+ mode = READ_ONCE(mm->mm_cid.mode);39073907+ cid = cid_from_transit_cid(prev->mm_cid.cid);39083908+39093909+ /*39103910+ * If transition mode is done, transfer ownership when the CID is39113911+ * within the convergence range to optimize the next schedule in.39123912+ */39133913+ if (!cid_in_transit(mode) && cid < READ_ONCE(mm->mm_cid.max_cids)) {39143914+ if (cid_on_cpu(mode))39153915+ cid = cid_to_cpu_cid(cid);39163916+39173917+ /* Update both so that the next schedule in goes into the fast path */39183918+ mm_cid_update_pcpu_cid(mm, cid);39193919+ prev->mm_cid.cid = cid;39203920+ } else {39213921+ mm_drop_cid(mm, cid);39223922+ prev->mm_cid.cid = MM_CID_UNSET;39233923+ }39103924}3911392539123926static inline void mm_cid_switch_to(struct task_struct *prev, struct task_struct *next)
···279279/* enough for Elf64_Ehdr, Elf64_Phdr, and all the smaller requests */280280#define MAX_FREADER_BUF_SZ 64281281282282-static int __build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,282282+static int __build_id_parse(struct file *file, unsigned char *build_id,283283 __u32 *size, bool may_fault)284284{285285 const Elf32_Ehdr *ehdr;···287287 char buf[MAX_FREADER_BUF_SZ];288288 int ret;289289290290- /* only works for page backed storage */291291- if (!vma->vm_file)292292- return -EINVAL;293293-294294- freader_init_from_file(&r, buf, sizeof(buf), vma->vm_file, may_fault);290290+ freader_init_from_file(&r, buf, sizeof(buf), file, may_fault);295291296292 /* fetch first 18 bytes of ELF header for checks */297293 ehdr = freader_fetch(&r, 0, offsetofend(Elf32_Ehdr, e_type));···315319 return ret;316320}317321318318-/*319319- * Parse build ID of ELF file mapped to vma322322+/**323323+ * build_id_parse_nofault() - Parse build ID of ELF file mapped to vma320324 * @vma: vma object321325 * @build_id: buffer to store build id, at least BUILD_ID_SIZE long322326 * @size: returns actual build id size in case of success···328332 */329333int build_id_parse_nofault(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size)330334{331331- return __build_id_parse(vma, build_id, size, false /* !may_fault */);335335+ if (!vma->vm_file)336336+ return -EINVAL;337337+338338+ return __build_id_parse(vma->vm_file, build_id, size, false /* !may_fault */);332339}333340334334-/*335335- * Parse build ID of ELF file mapped to VMA341341+/**342342+ * build_id_parse() - Parse build ID of ELF file mapped to VMA336343 * @vma: vma object337344 * @build_id: buffer to store build id, at least BUILD_ID_SIZE long338345 * @size: returns actual build id size in case of success···347348 */348349int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size)349350{350350- return __build_id_parse(vma, build_id, size, true /* may_fault */);351351+ if (!vma->vm_file)352352+ return -EINVAL;353353+354354+ return __build_id_parse(vma->vm_file, build_id, size, true /* may_fault */);355355+}356356+357357+/**358358+ * build_id_parse_file() - Parse build ID of ELF file359359+ * @file: file object360360+ * @build_id: buffer to store build id, at least BUILD_ID_SIZE long361361+ * @size: returns actual build id size in case of success362362+ *363363+ * Assumes faultable context and can cause page faults to bring in file data364364+ * into page cache.365365+ *366366+ * Return: 0 on success; negative error, otherwise367367+ */368368+int build_id_parse_file(struct file *file, unsigned char *build_id, __u32 *size)369369+{370370+ return __build_id_parse(file, build_id, size, true /* may_fault */);351371}352372353373/**
+20-22
mm/memory-failure.c
···24112411 * In fact it's dangerous to directly bump up page count from 0,24122412 * that may make page_ref_freeze()/page_ref_unfreeze() mismatch.24132413 */24142414- if (!(flags & MF_COUNT_INCREASED)) {24152415- res = get_hwpoison_page(p, flags);24162416- if (!res) {24172417- if (is_free_buddy_page(p)) {24182418- if (take_page_off_buddy(p)) {24192419- page_ref_inc(p);24202420- res = MF_RECOVERED;24212421- } else {24222422- /* We lost the race, try again */24232423- if (retry) {24242424- ClearPageHWPoison(p);24252425- retry = false;24262426- goto try_again;24272427- }24282428- res = MF_FAILED;24292429- }24302430- res = action_result(pfn, MF_MSG_BUDDY, res);24142414+ res = get_hwpoison_page(p, flags);24152415+ if (!res) {24162416+ if (is_free_buddy_page(p)) {24172417+ if (take_page_off_buddy(p)) {24182418+ page_ref_inc(p);24192419+ res = MF_RECOVERED;24312420 } else {24322432- res = action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED);24212421+ /* We lost the race, try again */24222422+ if (retry) {24232423+ ClearPageHWPoison(p);24242424+ retry = false;24252425+ goto try_again;24262426+ }24272427+ res = MF_FAILED;24332428 }24342434- goto unlock_mutex;24352435- } else if (res < 0) {24362436- res = action_result(pfn, MF_MSG_GET_HWPOISON, MF_IGNORED);24372437- goto unlock_mutex;24292429+ res = action_result(pfn, MF_MSG_BUDDY, res);24302430+ } else {24312431+ res = action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED);24382432 }24332433+ goto unlock_mutex;24342434+ } else if (res < 0) {24352435+ res = action_result(pfn, MF_MSG_GET_HWPOISON, MF_IGNORED);24362436+ goto unlock_mutex;24392437 }2440243824412439 folio = page_folio(p);
+14-9
mm/shmem.c
···12111211 swaps_freed = shmem_free_swap(mapping, indices[i],12121212 end - 1, folio);12131213 if (!swaps_freed) {12141214- /*12151215- * If found a large swap entry cross the end border,12161216- * skip it as the truncate_inode_partial_folio above12171217- * should have at least zerod its content once.12181218- */12141214+ pgoff_t base = indices[i];12151215+12191216 order = shmem_confirm_swap(mapping, indices[i],12201217 radix_to_swp_entry(folio));12211221- if (order > 0 && indices[i] + (1 << order) > end)12221222- continue;12231223- /* Swap was replaced by page: retry */12241224- index = indices[i];12181218+ /*12191219+ * If found a large swap entry cross the end or start12201220+ * border, skip it as the truncate_inode_partial_folio12211221+ * above should have at least zerod its content once.12221222+ */12231223+ if (order > 0) {12241224+ base = round_down(base, 1 << order);12251225+ if (base < start || base + (1 << order) > end)12261226+ continue;12271227+ }12281228+ /* Swap was replaced by page or extended, retry */12291229+ index = base;12251230 break;12261231 }12271232 nr_swaps_freed += swaps_freed;
···265265 goto out;266266 }267267268268+ /* NICs can feed encapsulated packets into GRO */269269+ skb->encapsulation = 0;268270 rcu_read_lock();269271 list_for_each_entry_rcu(ptype, head, list) {270272 if (ptype->type != type || !ptype->callbacks.gro_complete)
+15-5
net/core/link_watch.c
···185185186186 netif_state_change(dev);187187 }188188- /* Note: our callers are responsible for calling netdev_tracker_free().189189- * This is the reason we use __dev_put() instead of dev_put().190190- */191191- __dev_put(dev);192188}193189194190static void __linkwatch_run_queue(int urgent_only)···239243 netdev_lock_ops(dev);240244 linkwatch_do_dev(dev);241245 netdev_unlock_ops(dev);246246+ /* Use __dev_put() because netdev_tracker_free() was already247247+ * called above. Must be after netdev_unlock_ops() to prevent248248+ * netdev_run_todo() from freeing the device while still in use.249249+ */250250+ __dev_put(dev);242251 do_dev--;243252 spin_lock_irq(&lweventlist_lock);244253 }···279278{280279 netdev_ops_assert_locked(dev);281280282282- if (linkwatch_clean_dev(dev))281281+ if (linkwatch_clean_dev(dev)) {283282 linkwatch_do_dev(dev);283283+ /* Use __dev_put() because netdev_tracker_free() was already284284+ * called inside linkwatch_clean_dev().285285+ */286286+ __dev_put(dev);287287+ }284288}285289286290void linkwatch_sync_dev(struct net_device *dev)···294288 netdev_lock_ops(dev);295289 linkwatch_do_dev(dev);296290 netdev_unlock_ops(dev);291291+ /* Use __dev_put() because netdev_tracker_free() was already292292+ * called inside linkwatch_clean_dev().293293+ */294294+ __dev_put(dev);297295 }298296}299297
+34-16
net/core/net-procfs.c
···170170 .show = softnet_seq_show,171171};172172173173+struct ptype_iter_state {174174+ struct seq_net_private p;175175+ struct net_device *dev;176176+};177177+173178static void *ptype_get_idx(struct seq_file *seq, loff_t pos)174179{180180+ struct ptype_iter_state *iter = seq->private;175181 struct list_head *ptype_list = NULL;176182 struct packet_type *pt = NULL;177183 struct net_device *dev;···187181 for_each_netdev_rcu(seq_file_net(seq), dev) {188182 ptype_list = &dev->ptype_all;189183 list_for_each_entry_rcu(pt, ptype_list, list) {190190- if (i == pos)184184+ if (i == pos) {185185+ iter->dev = dev;191186 return pt;187187+ }192188 ++i;193189 }194190 }191191+192192+ iter->dev = NULL;195193196194 list_for_each_entry_rcu(pt, &seq_file_net(seq)->ptype_all, list) {197195 if (i == pos)···228218229219static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)230220{221221+ struct ptype_iter_state *iter = seq->private;231222 struct net *net = seq_file_net(seq);232223 struct net_device *dev;233224 struct packet_type *pt;···240229 return ptype_get_idx(seq, 0);241230242231 pt = v;243243- nxt = pt->list.next;244244- if (pt->dev) {245245- if (nxt != &pt->dev->ptype_all)232232+ nxt = READ_ONCE(pt->list.next);233233+ dev = iter->dev;234234+ if (dev) {235235+ if (nxt != &dev->ptype_all)246236 goto found;247237248248- dev = pt->dev;249238 for_each_netdev_continue_rcu(seq_file_net(seq), dev) {250250- if (!list_empty(&dev->ptype_all)) {251251- nxt = dev->ptype_all.next;239239+ nxt = READ_ONCE(dev->ptype_all.next);240240+ if (nxt != &dev->ptype_all) {241241+ iter->dev = dev;252242 goto found;253243 }254244 }255255- nxt = net->ptype_all.next;245245+ iter->dev = NULL;246246+ nxt = READ_ONCE(net->ptype_all.next);256247 goto net_ptype_all;257248 }258249···265252266253 if (nxt == &net->ptype_all) {267254 /* continue with ->ptype_specific if it's not empty */268268- nxt = net->ptype_specific.next;255255+ nxt = READ_ONCE(net->ptype_specific.next);269256 if (nxt != &net->ptype_specific)270257 goto found;271258 }272259273260 hash = 0;274274- nxt = ptype_base[0].next;261261+ nxt = READ_ONCE(ptype_base[0].next);275262 } else276263 hash = ntohs(pt->type) & PTYPE_HASH_MASK;277264278265 while (nxt == &ptype_base[hash]) {279266 if (++hash >= PTYPE_HASH_SIZE)280267 return NULL;281281- nxt = ptype_base[hash].next;268268+ nxt = READ_ONCE(ptype_base[hash].next);282269 }283270found:284271 return list_entry(nxt, struct packet_type, list);···292279293280static int ptype_seq_show(struct seq_file *seq, void *v)294281{282282+ struct ptype_iter_state *iter = seq->private;295283 struct packet_type *pt = v;284284+ struct net_device *dev;296285297297- if (v == SEQ_START_TOKEN)286286+ if (v == SEQ_START_TOKEN) {298287 seq_puts(seq, "Type Device Function\n");299299- else if ((!pt->af_packet_net || net_eq(pt->af_packet_net, seq_file_net(seq))) &&300300- (!pt->dev || net_eq(dev_net(pt->dev), seq_file_net(seq)))) {288288+ return 0;289289+ }290290+ dev = iter->dev;291291+ if ((!pt->af_packet_net || net_eq(pt->af_packet_net, seq_file_net(seq))) &&292292+ (!dev || net_eq(dev_net(dev), seq_file_net(seq)))) {301293 if (pt->type == htons(ETH_P_ALL))302294 seq_puts(seq, "ALL ");303295 else304296 seq_printf(seq, "%04x", ntohs(pt->type));305297306298 seq_printf(seq, " %-8s %ps\n",307307- pt->dev ? pt->dev->name : "", pt->func);299299+ dev ? dev->name : "", pt->func);308300 }309301310302 return 0;···333315 &softnet_seq_ops))334316 goto out_dev;335317 if (!proc_create_net("ptype", 0444, net->proc_net, &ptype_seq_ops,336336- sizeof(struct seq_net_private)))318318+ sizeof(struct ptype_iter_state)))337319 goto out_softnet;338320339321 if (wext_proc_init(net))
···55#include <linux/sysctl.h>66#include <linux/minmax.h>7788-#include "lsm.h"99-108/* amount of vm to protect from userspace access by both DAC and the LSM*/119unsigned long mmap_min_addr;1210/* amount of vm to protect from userspace using CAP_SYS_RAWIO (DAC) */···5254 },5355};54565555-int __init min_addr_init(void)5757+static int __init mmap_min_addr_init(void)5658{5759 register_sysctl_init("vm", min_addr_sysctl_table);5860 update_mmap_min_addr();59616062 return 0;6163}6464+pure_initcall(mmap_min_addr_init);
···13101310 int i;1311131113121312 hdev = pdata->hw_pdata;13131313- link_mask = hdev->info.link_mask;1314131313151315- if (!link_mask) {13141314+ if (!hdev->info.link_mask) {13161315 dev_info(sdev->dev, "SoundWire links not enabled\n");13171316 return NULL;13181317 }···13421343 * link_mask supported by hw and then go on searching13431344 * link_adr13441345 */13451345- if (~link_mask & mach->link_mask)13461346+ if (~hdev->info.link_mask & mach->link_mask)13461347 continue;1347134813481349 /* No need to match adr if there is no links defined */
+2-7
sound/usb/mixer_quirks.c
···311311 if (pm.err < 0)312312 return pm.err;313313314314- if (chip->usb_id == USB_ID(0x041e, 0x3042))315315- err = snd_usb_ctl_msg(chip->dev,316316- usb_sndctrlpipe(chip->dev, 0), 0x24,317317- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,318318- !value, 0, NULL, 0);319319- /* USB X-Fi S51 Pro */320320- if (chip->usb_id == USB_ID(0x041e, 0x30df))314314+ if (chip->usb_id == USB_ID(0x041e, 0x3042) || /* USB X-Fi S51 */315315+ chip->usb_id == USB_ID(0x041e, 0x30df)) /* USB X-Fi S51 Pro */321316 err = snd_usb_ctl_msg(chip->dev,322317 usb_sndctrlpipe(chip->dev, 0), 0x24,323318 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
···66#define SHN_LIVEPATCH 0xff207788/*99- * __klp_objects and __klp_funcs are created by klp diff and used by the patch1010- * module init code to build the klp_patch, klp_object and klp_func structs1111- * needed by the livepatch API.99+ * .init.klp_objects and .init.klp_funcs are created by klp diff and used by the1010+ * patch module init code to build the klp_patch, klp_object and klp_func1111+ * structs needed by the livepatch API.1212 */1313-#define KLP_OBJECTS_SEC "__klp_objects"1414-#define KLP_FUNCS_SEC "__klp_funcs"1313+#define KLP_OBJECTS_SEC ".init.klp_objects"1414+#define KLP_FUNCS_SEC ".init.klp_funcs"15151616/*1717 * __klp_relocs is an intermediate section which are created by klp diff and
+35-6
tools/objtool/klp-diff.c
···364364 struct symbol *file1_sym, *file2_sym;365365 struct symbol *sym1, *sym2;366366367367- /* Correlate locals */368368- for (file1_sym = first_file_symbol(e->orig),369369- file2_sym = first_file_symbol(e->patched); ;370370- file1_sym = next_file_symbol(e->orig, file1_sym),371371- file2_sym = next_file_symbol(e->patched, file2_sym)) {367367+ file1_sym = first_file_symbol(e->orig);368368+ file2_sym = first_file_symbol(e->patched);369369+370370+ /*371371+ * Correlate any locals before the first FILE symbol. This has been372372+ * seen when LTO inexplicably strips the initramfs_data.o FILE symbol373373+ * due to the file only containing data and no code.374374+ */375375+ for_each_sym(e->orig, sym1) {376376+ if (sym1 == file1_sym || !is_local_sym(sym1))377377+ break;378378+379379+ if (dont_correlate(sym1))380380+ continue;381381+382382+ for_each_sym(e->patched, sym2) {383383+ if (sym2 == file2_sym || !is_local_sym(sym2))384384+ break;385385+386386+ if (sym2->twin || dont_correlate(sym2))387387+ continue;388388+389389+ if (strcmp(sym1->demangled_name, sym2->demangled_name))390390+ continue;391391+392392+ sym1->twin = sym2;393393+ sym2->twin = sym1;394394+ break;395395+ }396396+ }397397+398398+ /* Correlate locals after the first FILE symbol */399399+ for (; ; file1_sym = next_file_symbol(e->orig, file1_sym),400400+ file2_sym = next_file_symbol(e->patched, file2_sym)) {372401373402 if (!file1_sym && file2_sym) {374403 ERROR("FILE symbol mismatch: NULL != %s", file2_sym->name);···14651436}1466143714671438/*14681468- * Create __klp_objects and __klp_funcs sections which are intermediate14391439+ * Create .init.klp_objects and .init.klp_funcs sections which are intermediate14691440 * sections provided as input to the patch module's init code for building the14701441 * klp_patch, klp_object and klp_func structs for the livepatch API.14711442 */
···162162 echo " ok"163163}164164165165+run_test_csum() {166166+ local -r msg="$1"167167+ local -r dst="$2"168168+ local csum_error_filter=UdpInCsumErrors169169+ local csum_errors170170+171171+ printf "%-40s" "$msg"172172+173173+ is_ipv6 "$dst" && csum_error_filter=Udp6InCsumErrors174174+175175+ ip netns exec "$NS_DST" iperf3 -s -1 >/dev/null &176176+ wait_local_port_listen "$NS_DST" 5201 tcp177177+ local spid="$!"178178+ ip netns exec "$NS_SRC" iperf3 -c "$dst" -t 2 >/dev/null179179+ local retc="$?"180180+ wait "$spid"181181+ local rets="$?"182182+ if [ "$rets" -ne 0 ] || [ "$retc" -ne 0 ]; then183183+ echo " fail client exit code $retc, server $rets"184184+ ret=1185185+ return186186+ fi187187+188188+ csum_errors=$(ip netns exec "$NS_DST" nstat -as "$csum_error_filter" |189189+ grep "$csum_error_filter" | awk '{print $2}')190190+ if [ -n "$csum_errors" ] && [ "$csum_errors" -gt 0 ]; then191191+ echo " fail - csum error on receive $csum_errors, expected 0"192192+ ret=1193193+ return194194+ fi195195+ echo " ok"196196+}197197+165198run_bench() {166199 local -r msg=$1167200 local -r dst=$2···292259 # stray traffic on top of the UDP tunnel293260 ip netns exec $NS_SRC $PING -q -c 1 $OL_NET$DST_NAT >/dev/null294261 run_test "GRO fwd over UDP tunnel" $OL_NET$DST_NAT 10 10 $OL_NET$DST262262+ cleanup263263+264264+ # force segmentation and re-aggregation265265+ create_vxlan_pair266266+ ip netns exec "$NS_DST" ethtool -K veth"$DST" generic-receive-offload on267267+ ip netns exec "$NS_SRC" ethtool -K veth"$SRC" tso off268268+ ip -n "$NS_SRC" link set dev veth"$SRC" mtu 1430269269+270270+ # forward to a 2nd veth pair271271+ ip -n "$NS_DST" link add br0 type bridge272272+ ip -n "$NS_DST" link set dev veth"$DST" master br0273273+274274+ # segment the aggregated TSO packet, without csum offload275275+ ip -n "$NS_DST" link add veth_segment type veth peer veth_rx276276+ for FEATURE in tso tx-udp-segmentation tx-checksumming; do277277+ ip netns exec "$NS_DST" ethtool -K veth_segment "$FEATURE" off278278+ done279279+ ip -n "$NS_DST" link set dev veth_segment master br0 up280280+ ip -n "$NS_DST" link set dev br0 up281281+ ip -n "$NS_DST" link set dev veth_rx up282282+283283+ # move the lower layer IP in the last added veth284284+ for ADDR in "$BM_NET_V4$DST/24" "$BM_NET_V6$DST/64"; do285285+ # the dad argument will let iproute emit a unharmful warning286286+ # with ipv4 addresses287287+ ip -n "$NS_DST" addr del dev veth"$DST" "$ADDR"288288+ ip -n "$NS_DST" addr add dev veth_rx "$ADDR" \289289+ nodad 2>/dev/null290290+ done291291+292292+ run_test_csum "GSO after GRO" "$OL_NET$DST"295293 cleanup296294done297295
+24-20
virt/kvm/eventfd.c
···157157}158158159159160160-/* assumes kvm->irqfds.lock is held */161161-static bool162162-irqfd_is_active(struct kvm_kernel_irqfd *irqfd)160160+static bool irqfd_is_active(struct kvm_kernel_irqfd *irqfd)163161{162162+ /*163163+ * Assert that either irqfds.lock or SRCU is held, as irqfds.lock must164164+ * be held to prevent false positives (on the irqfd being active), and165165+ * while false negatives are impossible as irqfds are never added back166166+ * to the list once they're deactivated, the caller must at least hold167167+ * SRCU to guard against routing changes if the irqfd is deactivated.168168+ */169169+ lockdep_assert_once(lockdep_is_held(&irqfd->kvm->irqfds.lock) ||170170+ srcu_read_lock_held(&irqfd->kvm->irq_srcu));171171+164172 return list_empty(&irqfd->list) ? false : true;165173}166174167175/*168176 * Mark the irqfd as inactive and schedule it for removal169169- *170170- * assumes kvm->irqfds.lock is held171177 */172172-static void173173-irqfd_deactivate(struct kvm_kernel_irqfd *irqfd)178178+static void irqfd_deactivate(struct kvm_kernel_irqfd *irqfd)174179{180180+ lockdep_assert_held(&irqfd->kvm->irqfds.lock);181181+175182 BUG_ON(!irqfd_is_active(irqfd));176183177184 list_del_init(&irqfd->list);···224217 seq = read_seqcount_begin(&irqfd->irq_entry_sc);225218 irq = irqfd->irq_entry;226219 } while (read_seqcount_retry(&irqfd->irq_entry_sc, seq));227227- /* An event has been signaled, inject an interrupt */228228- if (kvm_arch_set_irq_inatomic(&irq, kvm,220220+221221+ /*222222+ * An event has been signaled, inject an interrupt unless the223223+ * irqfd is being deassigned (isn't active), in which case the224224+ * routing information may be stale (once the irqfd is removed225225+ * from the list, it will stop receiving routing updates).226226+ */227227+ if (unlikely(!irqfd_is_active(irqfd)) ||228228+ kvm_arch_set_irq_inatomic(&irq, kvm,229229 KVM_USERSPACE_IRQ_SOURCE_ID, 1,230230 false) == -EWOULDBLOCK)231231 schedule_work(&irqfd->inject);···599585 spin_lock_irq(&kvm->irqfds.lock);600586601587 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {602602- if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) {603603- /*604604- * This clearing of irq_entry.type is needed for when605605- * another thread calls kvm_irq_routing_update before606606- * we flush workqueue below (we synchronize with607607- * kvm_irq_routing_update using irqfds.lock).608608- */609609- write_seqcount_begin(&irqfd->irq_entry_sc);610610- irqfd->irq_entry.type = 0;611611- write_seqcount_end(&irqfd->irq_entry_sc);588588+ if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi)612589 irqfd_deactivate(irqfd);613613- }614590 }615591616592 spin_unlock_irq(&kvm->irqfds.lock);