Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'driver-core-6.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/driver-core/driver-core

Pull driver core updates from Danilo Krummrich:
"Auxiliary:
- Drop call to dev_pm_domain_detach() in auxiliary_bus_probe()
- Optimize logic of auxiliary_match_id()

Rust:
- Auxiliary:
- Use primitive C types from prelude

- DebugFs:
- Add debugfs support for simple read/write files and custom
callbacks through a File-type-based and directory-scope-based
API
- Sample driver code for the File-type-based API
- Sample module code for the directory-scope-based API

- I/O:
- Add io::poll module and implement Rust specific
read_poll_timeout() helper

- IRQ:
- Implement support for threaded and non-threaded device IRQs
based on (&Device<Bound>, IRQ number) tuples (IrqRequest)
- Provide &Device<Bound> cookie in IRQ handlers

- PCI:
- Support IRQ requests from IRQ vectors for a specific
pci::Device<Bound>
- Implement accessors for subsystem IDs, revision, devid and
resource start
- Provide dedicated pci::Vendor and pci::Class types for vendor
and class ID numbers
- Implement Display to print actual vendor and class names; Debug
to print the raw ID numbers
- Add pci::DeviceId::from_class_and_vendor() helper
- Use primitive C types from prelude
- Various minor inline and (safety) comment improvements

- Platform:
- Support IRQ requests from IRQ vectors for a specific
platform::Device<Bound>

- Nova:
- Use pci::DeviceId::from_class_and_vendor() to avoid probing
non-display/compute PCI functions

- Misc:
- Add helper for cpu_relax()
- Update ARef import from sync::aref

sysfs:
- Remove bin_attrs_new field from struct attribute_group
- Remove read_new() and write_new() from struct bin_attribute

Misc:
- Document potential race condition in get_dev_from_fwnode()
- Constify node_group argument in software node registration
functions
- Fix order of kernel-doc parameters in various functions
- Set power.no_pm flag for faux devices
- Set power.no_callbacks flag along with the power.no_pm flag
- Constify the pmu_bus bus type
- Minor spelling fixes"

* tag 'driver-core-6.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/driver-core/driver-core: (43 commits)
rust: pci: display symbolic PCI vendor names
rust: pci: display symbolic PCI class names
rust: pci: fix incorrect platform reference in PCI driver probe doc comment
rust: pci: fix incorrect platform reference in PCI driver unbind doc comment
perf: make pmu_bus const
samples: rust: Add scoped debugfs sample driver
rust: debugfs: Add support for scoped directories
samples: rust: Add debugfs sample driver
rust: debugfs: Add support for callback-based files
rust: debugfs: Add support for writable files
rust: debugfs: Add support for read-only files
rust: debugfs: Add initial support for directories
driver core: auxiliary bus: Optimize logic of auxiliary_match_id()
driver core: auxiliary bus: Drop dev_pm_domain_detach() call
driver core: Fix order of the kernel-doc parameters
driver core: get_dev_from_fwnode(): document potential race
drivers: base: fix "publically"->"publicly"
driver core/PM: Set power.no_callbacks along with power.no_pm
driver core: faux: Set power.no_pm for faux devices
rust: pci: inline several tiny functions
...

+3392 -99
+20
MAINTAINERS
··· 7021 7021 F: include/dt-bindings/pmu/exynos_ppmu.h 7022 7022 F: include/linux/devfreq-event.h 7023 7023 7024 + DEVICE I/O & IRQ [RUST] 7025 + M: Danilo Krummrich <dakr@kernel.org> 7026 + M: Alice Ryhl <aliceryhl@google.com> 7027 + M: Daniel Almeida <daniel.almeida@collabora.com> 7028 + L: rust-for-linux@vger.kernel.org 7029 + S: Supported 7030 + W: https://rust-for-linux.com 7031 + B: https://github.com/Rust-for-Linux/linux/issues 7032 + C: https://rust-for-linux.zulipchat.com 7033 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/driver-core/driver-core.git 7034 + F: rust/kernel/io.rs 7035 + F: rust/kernel/io/ 7036 + F: rust/kernel/irq.rs 7037 + F: rust/kernel/irq/ 7038 + 7024 7039 DEVICE RESOURCE MANAGEMENT HELPERS 7025 7040 M: Hans de Goede <hansg@kernel.org> 7026 7041 R: Matti Vaittinen <mazziesaccount@gmail.com> ··· 7487 7472 F: include/linux/property.h 7488 7473 F: include/linux/sysfs.h 7489 7474 F: lib/kobj* 7475 + F: rust/kernel/debugfs.rs 7476 + F: rust/kernel/debugfs/ 7490 7477 F: rust/kernel/device.rs 7491 7478 F: rust/kernel/device/ 7492 7479 F: rust/kernel/device_id.rs ··· 7496 7479 F: rust/kernel/driver.rs 7497 7480 F: rust/kernel/faux.rs 7498 7481 F: rust/kernel/platform.rs 7482 + F: samples/rust/rust_debugfs.rs 7483 + F: samples/rust/rust_debugfs_scoped.rs 7499 7484 F: samples/rust/rust_driver_platform.rs 7500 7485 F: samples/rust/rust_driver_faux.rs 7501 7486 ··· 19593 19574 T: git git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci.git 19594 19575 F: rust/helpers/pci.c 19595 19576 F: rust/kernel/pci.rs 19577 + F: rust/kernel/pci/ 19596 19578 F: samples/rust/rust_driver_pci.rs 19597 19579 19598 19580 PCIE BANDWIDTH CONTROLLER
+12 -15
drivers/base/auxiliary.c
··· 171 171 static const struct auxiliary_device_id *auxiliary_match_id(const struct auxiliary_device_id *id, 172 172 const struct auxiliary_device *auxdev) 173 173 { 174 + const char *auxdev_name = dev_name(&auxdev->dev); 175 + const char *p = strrchr(auxdev_name, '.'); 176 + int match_size; 177 + 178 + if (!p) 179 + return NULL; 180 + match_size = p - auxdev_name; 181 + 174 182 for (; id->name[0]; id++) { 175 - const char *p = strrchr(dev_name(&auxdev->dev), '.'); 176 - int match_size; 177 - 178 - if (!p) 179 - continue; 180 - match_size = p - dev_name(&auxdev->dev); 181 - 182 183 /* use dev_name(&auxdev->dev) prefix before last '.' char to match to */ 183 184 if (strlen(id->name) == match_size && 184 - !strncmp(dev_name(&auxdev->dev), id->name, match_size)) 185 + !strncmp(auxdev_name, id->name, match_size)) 185 186 return id; 186 187 } 187 188 return NULL; ··· 218 217 struct auxiliary_device *auxdev = to_auxiliary_dev(dev); 219 218 int ret; 220 219 221 - ret = dev_pm_domain_attach(dev, PD_FLAG_ATTACH_POWER_ON); 220 + ret = dev_pm_domain_attach(dev, PD_FLAG_ATTACH_POWER_ON | 221 + PD_FLAG_DETACH_POWER_OFF); 222 222 if (ret) { 223 223 dev_warn(dev, "Failed to attach to PM Domain : %d\n", ret); 224 224 return ret; 225 225 } 226 226 227 - ret = auxdrv->probe(auxdev, auxiliary_match_id(auxdrv->id_table, auxdev)); 228 - if (ret) 229 - dev_pm_domain_detach(dev, true); 230 - 231 - return ret; 227 + return auxdrv->probe(auxdev, auxiliary_match_id(auxdrv->id_table, auxdev)); 232 228 } 233 229 234 230 static void auxiliary_bus_remove(struct device *dev) ··· 235 237 236 238 if (auxdrv->remove) 237 239 auxdrv->remove(auxdev); 238 - dev_pm_domain_detach(dev, true); 239 240 } 240 241 241 242 static void auxiliary_bus_shutdown(struct device *dev)
+23 -4
drivers/base/core.c
··· 3994 3994 /** 3995 3995 * device_for_each_child - device child iterator. 3996 3996 * @parent: parent struct device. 3997 - * @fn: function to be called for each device. 3998 3997 * @data: data for the callback. 3998 + * @fn: function to be called for each device. 3999 3999 * 4000 4000 * Iterate over @parent's child devices, and call @fn for each, 4001 4001 * passing it @data. ··· 4024 4024 /** 4025 4025 * device_for_each_child_reverse - device child iterator in reversed order. 4026 4026 * @parent: parent struct device. 4027 - * @fn: function to be called for each device. 4028 4027 * @data: data for the callback. 4028 + * @fn: function to be called for each device. 4029 4029 * 4030 4030 * Iterate over @parent's child devices, and call @fn for each, 4031 4031 * passing it @data. ··· 4055 4055 * device_for_each_child_reverse_from - device child iterator in reversed order. 4056 4056 * @parent: parent struct device. 4057 4057 * @from: optional starting point in child list 4058 - * @fn: function to be called for each device. 4059 4058 * @data: data for the callback. 4059 + * @fn: function to be called for each device. 4060 4060 * 4061 4061 * Iterate over @parent's child devices, starting at @from, and call @fn 4062 4062 * for each, passing it @data. This helper is identical to ··· 4089 4089 /** 4090 4090 * device_find_child - device iterator for locating a particular device. 4091 4091 * @parent: parent struct device 4092 - * @match: Callback function to check device 4093 4092 * @data: Data to pass to match function 4093 + * @match: Callback function to check device 4094 4094 * 4095 4095 * This is similar to the device_for_each_child() function above, but it 4096 4096 * returns a reference to a device that is 'found' for later use, as ··· 5278 5278 } 5279 5279 EXPORT_SYMBOL_GPL(device_set_node); 5280 5280 5281 + /** 5282 + * get_dev_from_fwnode - Obtain a reference count of the struct device the 5283 + * struct fwnode_handle is associated with. 5284 + * @fwnode: The pointer to the struct fwnode_handle to obtain the struct device 5285 + * reference count of. 5286 + * 5287 + * This function obtains a reference count of the device the device pointer 5288 + * embedded in the struct fwnode_handle points to. 5289 + * 5290 + * Note that the struct device pointer embedded in struct fwnode_handle does 5291 + * *not* have a reference count of the struct device itself. 5292 + * 5293 + * Hence, it is a UAF (and thus a bug) to call this function if the caller can't 5294 + * guarantee that the last reference count of the corresponding struct device is 5295 + * not dropped concurrently. 5296 + * 5297 + * This is possible since struct fwnode_handle has its own reference count and 5298 + * hence can out-live the struct device it is associated with. 5299 + */ 5281 5300 struct device *get_dev_from_fwnode(struct fwnode_handle *fwnode) 5282 5301 { 5283 5302 return get_device((fwnode)->dev);
+1 -1
drivers/base/cpu.c
··· 325 325 * This is an empty function to prevent the driver core from spitting a 326 326 * warning at us. Yes, I know this is directly opposite of what the 327 327 * documentation for the driver core and kobjects say, and the author 328 - * of this code has already been publically ridiculed for doing 328 + * of this code has already been publicly ridiculed for doing 329 329 * something as foolish as this. However, at this point in time, it is 330 330 * the only way to handle the issue of statically allocated cpu 331 331 * devices. The different architectures will have their cpu device
+1
drivers/base/faux.c
··· 155 155 dev->parent = &faux_bus_root; 156 156 dev->bus = &faux_bus_type; 157 157 dev_set_name(dev, "%s", name); 158 + device_set_pm_not_required(dev); 158 159 159 160 ret = device_add(dev); 160 161 if (ret) {
+2 -3
drivers/base/swnode.c
··· 844 844 * of this function or by ordering the array such that parent comes before 845 845 * child. 846 846 */ 847 - int software_node_register_node_group(const struct software_node **node_group) 847 + int software_node_register_node_group(const struct software_node * const *node_group) 848 848 { 849 849 unsigned int i; 850 850 int ret; ··· 877 877 * remove the nodes individually, in the correct order (child before 878 878 * parent). 879 879 */ 880 - void software_node_unregister_node_group( 881 - const struct software_node **node_group) 880 + void software_node_unregister_node_group(const struct software_node * const *node_group) 882 881 { 883 882 unsigned int i = 0; 884 883
+28 -5
drivers/gpu/nova-core/driver.rs
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 3 - use kernel::{auxiliary, bindings, c_str, device::Core, pci, prelude::*, sizes::SZ_16M, sync::Arc}; 3 + use kernel::{ 4 + auxiliary, c_str, 5 + device::Core, 6 + pci, 7 + pci::{Class, ClassMask, Vendor}, 8 + prelude::*, 9 + sizes::SZ_16M, 10 + sync::Arc, 11 + }; 4 12 5 13 use crate::gpu::Gpu; 6 14 ··· 26 18 PCI_TABLE, 27 19 MODULE_PCI_TABLE, 28 20 <NovaCore as pci::Driver>::IdInfo, 29 - [( 30 - pci::DeviceId::from_id(bindings::PCI_VENDOR_ID_NVIDIA, bindings::PCI_ANY_ID as u32), 31 - () 32 - )] 21 + [ 22 + // Modern NVIDIA GPUs will show up as either VGA or 3D controllers. 23 + ( 24 + pci::DeviceId::from_class_and_vendor( 25 + Class::DISPLAY_VGA, 26 + ClassMask::ClassSubclass, 27 + Vendor::NVIDIA 28 + ), 29 + () 30 + ), 31 + ( 32 + pci::DeviceId::from_class_and_vendor( 33 + Class::DISPLAY_3D, 34 + ClassMask::ClassSubclass, 35 + Vendor::NVIDIA 36 + ), 37 + () 38 + ), 39 + ] 33 40 ); 34 41 35 42 impl pci::Driver for NovaCore {
+5 -17
fs/sysfs/file.c
··· 97 97 count = size - pos; 98 98 } 99 99 100 - if (!battr->read && !battr->read_new) 100 + if (!battr->read) 101 101 return -EIO; 102 - 103 - if (battr->read_new) 104 - return battr->read_new(of->file, kobj, battr, buf, pos, count); 105 102 106 103 return battr->read(of->file, kobj, battr, buf, pos, count); 107 104 } ··· 158 161 if (!count) 159 162 return 0; 160 163 161 - if (!battr->write && !battr->write_new) 164 + if (!battr->write) 162 165 return -EIO; 163 - 164 - if (battr->write_new) 165 - return battr->write_new(of->file, kobj, battr, buf, pos, count); 166 166 167 167 return battr->write(of->file, kobj, battr, buf, pos, count); 168 168 } ··· 329 335 const struct kernfs_ops *ops; 330 336 struct kernfs_node *kn; 331 337 332 - if (battr->read && battr->read_new) 333 - return -EINVAL; 334 - 335 - if (battr->write && battr->write_new) 336 - return -EINVAL; 337 - 338 338 if (battr->mmap) 339 339 ops = &sysfs_bin_kfops_mmap; 340 - else if ((battr->read || battr->read_new) && (battr->write || battr->write_new)) 340 + else if (battr->read && battr->write) 341 341 ops = &sysfs_bin_kfops_rw; 342 - else if (battr->read || battr->read_new) 342 + else if (battr->read) 343 343 ops = &sysfs_bin_kfops_ro; 344 - else if (battr->write || battr->write_new) 344 + else if (battr->write) 345 345 ops = &sysfs_bin_kfops_wo; 346 346 else 347 347 ops = &sysfs_file_kfops_empty;
+3
include/linux/device.h
··· 851 851 static inline void device_set_pm_not_required(struct device *dev) 852 852 { 853 853 dev->power.no_pm = true; 854 + #ifdef CONFIG_PM 855 + dev->power.no_callbacks = true; 856 + #endif 854 857 } 855 858 856 859 static inline void dev_pm_syscore_device(struct device *dev, bool val)
+2 -2
include/linux/property.h
··· 574 574 software_node_find_by_name(const struct software_node *parent, 575 575 const char *name); 576 576 577 - int software_node_register_node_group(const struct software_node **node_group); 578 - void software_node_unregister_node_group(const struct software_node **node_group); 577 + int software_node_register_node_group(const struct software_node * const *node_group); 578 + void software_node_unregister_node_group(const struct software_node * const *node_group); 579 579 580 580 int software_node_register(const struct software_node *node); 581 581 void software_node_unregister(const struct software_node *node);
+2 -9
include/linux/sysfs.h
··· 106 106 const struct bin_attribute *, 107 107 int); 108 108 struct attribute **attrs; 109 - union { 110 - const struct bin_attribute *const *bin_attrs; 111 - const struct bin_attribute *const *bin_attrs_new; 112 - }; 109 + const struct bin_attribute *const *bin_attrs; 113 110 }; 114 111 115 112 #define SYSFS_PREALLOC 010000 ··· 290 293 291 294 #define BIN_ATTRIBUTE_GROUPS(_name) \ 292 295 static const struct attribute_group _name##_group = { \ 293 - .bin_attrs_new = _name##_attrs, \ 296 + .bin_attrs = _name##_attrs, \ 294 297 }; \ 295 298 __ATTRIBUTE_GROUPS(_name) 296 299 ··· 305 308 struct address_space *(*f_mapping)(void); 306 309 ssize_t (*read)(struct file *, struct kobject *, const struct bin_attribute *, 307 310 char *, loff_t, size_t); 308 - ssize_t (*read_new)(struct file *, struct kobject *, const struct bin_attribute *, 309 - char *, loff_t, size_t); 310 311 ssize_t (*write)(struct file *, struct kobject *, const struct bin_attribute *, 311 312 char *, loff_t, size_t); 312 - ssize_t (*write_new)(struct file *, struct kobject *, 313 - const struct bin_attribute *, char *, loff_t, size_t); 314 313 loff_t (*llseek)(struct file *, struct kobject *, const struct bin_attribute *, 315 314 loff_t, int); 316 315 int (*mmap)(struct file *, struct kobject *, const struct bin_attribute *attr,
+1 -1
kernel/events/core.c
··· 12234 12234 }; 12235 12235 12236 12236 static int pmu_bus_running; 12237 - static struct bus_type pmu_bus = { 12237 + static const struct bus_type pmu_bus = { 12238 12238 .name = "event_source", 12239 12239 .dev_groups = pmu_dev_groups, 12240 12240 };
+2
rust/bindings/bindings_helper.h
··· 46 46 #include <linux/cpufreq.h> 47 47 #include <linux/cpumask.h> 48 48 #include <linux/cred.h> 49 + #include <linux/debugfs.h> 49 50 #include <linux/device/faux.h> 50 51 #include <linux/dma-mapping.h> 51 52 #include <linux/errname.h> 52 53 #include <linux/ethtool.h> 53 54 #include <linux/file.h> 54 55 #include <linux/firmware.h> 56 + #include <linux/interrupt.h> 55 57 #include <linux/fs.h> 56 58 #include <linux/ioport.h> 57 59 #include <linux/jiffies.h>
+2
rust/helpers/helpers.c
··· 24 24 #include "dma.c" 25 25 #include "drm.c" 26 26 #include "err.c" 27 + #include "irq.c" 27 28 #include "fs.c" 28 29 #include "io.c" 29 30 #include "jump_label.c" ··· 37 36 #include "pid_namespace.c" 38 37 #include "platform.c" 39 38 #include "poll.c" 39 + #include "processor.c" 40 40 #include "property.c" 41 41 #include "rbtree.c" 42 42 #include "rcu.c"
+9
rust/helpers/irq.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #include <linux/interrupt.h> 4 + 5 + int rust_helper_request_irq(unsigned int irq, irq_handler_t handler, 6 + unsigned long flags, const char *name, void *dev) 7 + { 8 + return request_irq(irq, handler, flags, name, dev); 9 + }
+18
rust/helpers/pci.c
··· 2 2 3 3 #include <linux/pci.h> 4 4 5 + u16 rust_helper_pci_dev_id(struct pci_dev *dev) 6 + { 7 + return PCI_DEVID(dev->bus->number, dev->devfn); 8 + } 9 + 10 + resource_size_t rust_helper_pci_resource_start(struct pci_dev *pdev, int bar) 11 + { 12 + return pci_resource_start(pdev, bar); 13 + } 14 + 5 15 resource_size_t rust_helper_pci_resource_len(struct pci_dev *pdev, int bar) 6 16 { 7 17 return pci_resource_len(pdev, bar); ··· 21 11 { 22 12 return dev_is_pci(dev); 23 13 } 14 + 15 + #ifndef CONFIG_PCI_MSI 16 + int rust_helper_pci_irq_vector(struct pci_dev *pdev, unsigned int nvec) 17 + { 18 + return pci_irq_vector(pdev, nvec); 19 + } 20 + 21 + #endif
+8
rust/helpers/processor.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #include <linux/processor.h> 4 + 5 + void rust_helper_cpu_relax(void) 6 + { 7 + cpu_relax(); 8 + }
+2 -2
rust/kernel/auxiliary.rs
··· 55 55 extern "C" fn probe_callback( 56 56 adev: *mut bindings::auxiliary_device, 57 57 id: *const bindings::auxiliary_device_id, 58 - ) -> kernel::ffi::c_int { 58 + ) -> c_int { 59 59 // SAFETY: The auxiliary bus only ever calls the probe callback with a valid pointer to a 60 60 // `struct auxiliary_device`. 61 61 // ··· 245 245 kernel::impl_device_context_into_aref!(Device); 246 246 247 247 // SAFETY: Instances of `Device` are always reference-counted. 248 - unsafe impl crate::types::AlwaysRefCounted for Device { 248 + unsafe impl crate::sync::aref::AlwaysRefCounted for Device { 249 249 fn inc_ref(&self) { 250 250 // SAFETY: The existence of a shared reference guarantees that the refcount is non-zero. 251 251 unsafe { bindings::get_device(self.as_ref().as_raw()) };
+594
rust/kernel/debugfs.rs
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // Copyright (C) 2025 Google LLC. 3 + 4 + //! DebugFS Abstraction 5 + //! 6 + //! C header: [`include/linux/debugfs.h`](srctree/include/linux/debugfs.h) 7 + 8 + // When DebugFS is disabled, many parameters are dead. Linting for this isn't helpful. 9 + #![cfg_attr(not(CONFIG_DEBUG_FS), allow(unused_variables))] 10 + 11 + use crate::prelude::*; 12 + use crate::str::CStr; 13 + #[cfg(CONFIG_DEBUG_FS)] 14 + use crate::sync::Arc; 15 + use crate::uaccess::UserSliceReader; 16 + use core::fmt; 17 + use core::marker::PhantomData; 18 + use core::marker::PhantomPinned; 19 + #[cfg(CONFIG_DEBUG_FS)] 20 + use core::mem::ManuallyDrop; 21 + use core::ops::Deref; 22 + 23 + mod traits; 24 + pub use traits::{Reader, Writer}; 25 + 26 + mod callback_adapters; 27 + use callback_adapters::{FormatAdapter, NoWriter, WritableAdapter}; 28 + mod file_ops; 29 + use file_ops::{FileOps, ReadFile, ReadWriteFile, WriteFile}; 30 + #[cfg(CONFIG_DEBUG_FS)] 31 + mod entry; 32 + #[cfg(CONFIG_DEBUG_FS)] 33 + use entry::Entry; 34 + 35 + /// Owning handle to a DebugFS directory. 36 + /// 37 + /// The directory in the filesystem represented by [`Dir`] will be removed when handle has been 38 + /// dropped *and* all children have been removed. 39 + // If we have a parent, we hold a reference to it in the `Entry`. This prevents the `dentry` 40 + // we point to from being cleaned up if our parent `Dir`/`Entry` is dropped before us. 41 + // 42 + // The `None` option indicates that the `Arc` could not be allocated, so our children would not be 43 + // able to refer to us. In this case, we need to silently fail. All future child directories/files 44 + // will silently fail as well. 45 + #[derive(Clone)] 46 + pub struct Dir(#[cfg(CONFIG_DEBUG_FS)] Option<Arc<Entry<'static>>>); 47 + 48 + impl Dir { 49 + /// Create a new directory in DebugFS. If `parent` is [`None`], it will be created at the root. 50 + fn create(name: &CStr, parent: Option<&Dir>) -> Self { 51 + #[cfg(CONFIG_DEBUG_FS)] 52 + { 53 + let parent_entry = match parent { 54 + // If the parent couldn't be allocated, just early-return 55 + Some(Dir(None)) => return Self(None), 56 + Some(Dir(Some(entry))) => Some(entry.clone()), 57 + None => None, 58 + }; 59 + Self( 60 + // If Arc creation fails, the `Entry` will be dropped, so the directory will be 61 + // cleaned up. 62 + Arc::new(Entry::dynamic_dir(name, parent_entry), GFP_KERNEL).ok(), 63 + ) 64 + } 65 + #[cfg(not(CONFIG_DEBUG_FS))] 66 + Self() 67 + } 68 + 69 + /// Creates a DebugFS file which will own the data produced by the initializer provided in 70 + /// `data`. 71 + fn create_file<'a, T, E: 'a>( 72 + &'a self, 73 + name: &'a CStr, 74 + data: impl PinInit<T, E> + 'a, 75 + file_ops: &'static FileOps<T>, 76 + ) -> impl PinInit<File<T>, E> + 'a 77 + where 78 + T: Sync + 'static, 79 + { 80 + let scope = Scope::<T>::new(data, move |data| { 81 + #[cfg(CONFIG_DEBUG_FS)] 82 + if let Some(parent) = &self.0 { 83 + // SAFETY: Because data derives from a scope, and our entry will be dropped before 84 + // the data is dropped, it is guaranteed to outlive the entry we return. 85 + unsafe { Entry::dynamic_file(name, parent.clone(), data, file_ops) } 86 + } else { 87 + Entry::empty() 88 + } 89 + }); 90 + try_pin_init! { 91 + File { 92 + scope <- scope 93 + } ? E 94 + } 95 + } 96 + 97 + /// Create a new directory in DebugFS at the root. 98 + /// 99 + /// # Examples 100 + /// 101 + /// ``` 102 + /// # use kernel::c_str; 103 + /// # use kernel::debugfs::Dir; 104 + /// let debugfs = Dir::new(c_str!("parent")); 105 + /// ``` 106 + pub fn new(name: &CStr) -> Self { 107 + Dir::create(name, None) 108 + } 109 + 110 + /// Creates a subdirectory within this directory. 111 + /// 112 + /// # Examples 113 + /// 114 + /// ``` 115 + /// # use kernel::c_str; 116 + /// # use kernel::debugfs::Dir; 117 + /// let parent = Dir::new(c_str!("parent")); 118 + /// let child = parent.subdir(c_str!("child")); 119 + /// ``` 120 + pub fn subdir(&self, name: &CStr) -> Self { 121 + Dir::create(name, Some(self)) 122 + } 123 + 124 + /// Creates a read-only file in this directory. 125 + /// 126 + /// The file's contents are produced by invoking [`Writer::write`] on the value initialized by 127 + /// `data`. 128 + /// 129 + /// # Examples 130 + /// 131 + /// ``` 132 + /// # use kernel::c_str; 133 + /// # use kernel::debugfs::Dir; 134 + /// # use kernel::prelude::*; 135 + /// # let dir = Dir::new(c_str!("my_debugfs_dir")); 136 + /// let file = KBox::pin_init(dir.read_only_file(c_str!("foo"), 200), GFP_KERNEL)?; 137 + /// // "my_debugfs_dir/foo" now contains the number 200. 138 + /// // The file is removed when `file` is dropped. 139 + /// # Ok::<(), Error>(()) 140 + /// ``` 141 + pub fn read_only_file<'a, T, E: 'a>( 142 + &'a self, 143 + name: &'a CStr, 144 + data: impl PinInit<T, E> + 'a, 145 + ) -> impl PinInit<File<T>, E> + 'a 146 + where 147 + T: Writer + Send + Sync + 'static, 148 + { 149 + let file_ops = &<T as ReadFile<_>>::FILE_OPS; 150 + self.create_file(name, data, file_ops) 151 + } 152 + 153 + /// Creates a read-only file in this directory, with contents from a callback. 154 + /// 155 + /// `f` must be a function item or a non-capturing closure. 156 + /// This is statically asserted and not a safety requirement. 157 + /// 158 + /// # Examples 159 + /// 160 + /// ``` 161 + /// # use core::sync::atomic::{AtomicU32, Ordering}; 162 + /// # use kernel::c_str; 163 + /// # use kernel::debugfs::Dir; 164 + /// # use kernel::prelude::*; 165 + /// # let dir = Dir::new(c_str!("foo")); 166 + /// let file = KBox::pin_init( 167 + /// dir.read_callback_file(c_str!("bar"), 168 + /// AtomicU32::new(3), 169 + /// &|val, f| { 170 + /// let out = val.load(Ordering::Relaxed); 171 + /// writeln!(f, "{out:#010x}") 172 + /// }), 173 + /// GFP_KERNEL)?; 174 + /// // Reading "foo/bar" will show "0x00000003". 175 + /// file.store(10, Ordering::Relaxed); 176 + /// // Reading "foo/bar" will now show "0x0000000a". 177 + /// # Ok::<(), Error>(()) 178 + /// ``` 179 + pub fn read_callback_file<'a, T, E: 'a, F>( 180 + &'a self, 181 + name: &'a CStr, 182 + data: impl PinInit<T, E> + 'a, 183 + _f: &'static F, 184 + ) -> impl PinInit<File<T>, E> + 'a 185 + where 186 + T: Send + Sync + 'static, 187 + F: Fn(&T, &mut fmt::Formatter<'_>) -> fmt::Result + Send + Sync, 188 + { 189 + let file_ops = <FormatAdapter<T, F>>::FILE_OPS.adapt(); 190 + self.create_file(name, data, file_ops) 191 + } 192 + 193 + /// Creates a read-write file in this directory. 194 + /// 195 + /// Reading the file uses the [`Writer`] implementation. 196 + /// Writing to the file uses the [`Reader`] implementation. 197 + pub fn read_write_file<'a, T, E: 'a>( 198 + &'a self, 199 + name: &'a CStr, 200 + data: impl PinInit<T, E> + 'a, 201 + ) -> impl PinInit<File<T>, E> + 'a 202 + where 203 + T: Writer + Reader + Send + Sync + 'static, 204 + { 205 + let file_ops = &<T as ReadWriteFile<_>>::FILE_OPS; 206 + self.create_file(name, data, file_ops) 207 + } 208 + 209 + /// Creates a read-write file in this directory, with logic from callbacks. 210 + /// 211 + /// Reading from the file is handled by `f`. Writing to the file is handled by `w`. 212 + /// 213 + /// `f` and `w` must be function items or non-capturing closures. 214 + /// This is statically asserted and not a safety requirement. 215 + pub fn read_write_callback_file<'a, T, E: 'a, F, W>( 216 + &'a self, 217 + name: &'a CStr, 218 + data: impl PinInit<T, E> + 'a, 219 + _f: &'static F, 220 + _w: &'static W, 221 + ) -> impl PinInit<File<T>, E> + 'a 222 + where 223 + T: Send + Sync + 'static, 224 + F: Fn(&T, &mut fmt::Formatter<'_>) -> fmt::Result + Send + Sync, 225 + W: Fn(&T, &mut UserSliceReader) -> Result + Send + Sync, 226 + { 227 + let file_ops = 228 + <WritableAdapter<FormatAdapter<T, F>, W> as file_ops::ReadWriteFile<_>>::FILE_OPS 229 + .adapt() 230 + .adapt(); 231 + self.create_file(name, data, file_ops) 232 + } 233 + 234 + /// Creates a write-only file in this directory. 235 + /// 236 + /// The file owns its backing data. Writing to the file uses the [`Reader`] 237 + /// implementation. 238 + /// 239 + /// The file is removed when the returned [`File`] is dropped. 240 + pub fn write_only_file<'a, T, E: 'a>( 241 + &'a self, 242 + name: &'a CStr, 243 + data: impl PinInit<T, E> + 'a, 244 + ) -> impl PinInit<File<T>, E> + 'a 245 + where 246 + T: Reader + Send + Sync + 'static, 247 + { 248 + self.create_file(name, data, &T::FILE_OPS) 249 + } 250 + 251 + /// Creates a write-only file in this directory, with write logic from a callback. 252 + /// 253 + /// `w` must be a function item or a non-capturing closure. 254 + /// This is statically asserted and not a safety requirement. 255 + pub fn write_callback_file<'a, T, E: 'a, W>( 256 + &'a self, 257 + name: &'a CStr, 258 + data: impl PinInit<T, E> + 'a, 259 + _w: &'static W, 260 + ) -> impl PinInit<File<T>, E> + 'a 261 + where 262 + T: Send + Sync + 'static, 263 + W: Fn(&T, &mut UserSliceReader) -> Result + Send + Sync, 264 + { 265 + let file_ops = <WritableAdapter<NoWriter<T>, W> as WriteFile<_>>::FILE_OPS 266 + .adapt() 267 + .adapt(); 268 + self.create_file(name, data, file_ops) 269 + } 270 + 271 + // While this function is safe, it is intentionally not public because it's a bit of a 272 + // footgun. 273 + // 274 + // Unless you also extract the `entry` later and schedule it for `Drop` at the appropriate 275 + // time, a `ScopedDir` with a `Dir` parent will never be deleted. 276 + fn scoped_dir<'data>(&self, name: &CStr) -> ScopedDir<'data, 'static> { 277 + #[cfg(CONFIG_DEBUG_FS)] 278 + { 279 + let parent_entry = match &self.0 { 280 + None => return ScopedDir::empty(), 281 + Some(entry) => entry.clone(), 282 + }; 283 + ScopedDir { 284 + entry: ManuallyDrop::new(Entry::dynamic_dir(name, Some(parent_entry))), 285 + _phantom: PhantomData, 286 + } 287 + } 288 + #[cfg(not(CONFIG_DEBUG_FS))] 289 + ScopedDir::empty() 290 + } 291 + 292 + /// Creates a new scope, which is a directory associated with some data `T`. 293 + /// 294 + /// The created directory will be a subdirectory of `self`. The `init` closure is called to 295 + /// populate the directory with files and subdirectories. These files can reference the data 296 + /// stored in the scope. 297 + /// 298 + /// The entire directory tree created within the scope will be removed when the returned 299 + /// `Scope` handle is dropped. 300 + pub fn scope<'a, T: 'a, E: 'a, F>( 301 + &'a self, 302 + data: impl PinInit<T, E> + 'a, 303 + name: &'a CStr, 304 + init: F, 305 + ) -> impl PinInit<Scope<T>, E> + 'a 306 + where 307 + F: for<'data, 'dir> FnOnce(&'data T, &'dir ScopedDir<'data, 'dir>) + 'a, 308 + { 309 + Scope::new(data, |data| { 310 + let scoped = self.scoped_dir(name); 311 + init(data, &scoped); 312 + scoped.into_entry() 313 + }) 314 + } 315 + } 316 + 317 + #[pin_data] 318 + /// Handle to a DebugFS scope, which ensures that attached `data` will outlive the DebugFS entry 319 + /// without moving. 320 + /// 321 + /// This is internally used to back [`File`], and used in the API to represent the attachment 322 + /// of a directory lifetime to a data structure which may be jointly accessed by a number of 323 + /// different files. 324 + /// 325 + /// When dropped, a `Scope` will remove all directories and files in the filesystem backed by the 326 + /// attached data structure prior to releasing the attached data. 327 + pub struct Scope<T> { 328 + // This order is load-bearing for drops - `_entry` must be dropped before `data`. 329 + #[cfg(CONFIG_DEBUG_FS)] 330 + _entry: Entry<'static>, 331 + #[pin] 332 + data: T, 333 + // Even if `T` is `Unpin`, we still can't allow it to be moved. 334 + #[pin] 335 + _pin: PhantomPinned, 336 + } 337 + 338 + #[pin_data] 339 + /// Handle to a DebugFS file, owning its backing data. 340 + /// 341 + /// When dropped, the DebugFS file will be removed and the attached data will be dropped. 342 + pub struct File<T> { 343 + #[pin] 344 + scope: Scope<T>, 345 + } 346 + 347 + #[cfg(not(CONFIG_DEBUG_FS))] 348 + impl<'b, T: 'b> Scope<T> { 349 + fn new<E: 'b, F>(data: impl PinInit<T, E> + 'b, init: F) -> impl PinInit<Self, E> + 'b 350 + where 351 + F: for<'a> FnOnce(&'a T) + 'b, 352 + { 353 + try_pin_init! { 354 + Self { 355 + data <- data, 356 + _pin: PhantomPinned 357 + } ? E 358 + } 359 + .pin_chain(|scope| { 360 + init(&scope.data); 361 + Ok(()) 362 + }) 363 + } 364 + } 365 + 366 + #[cfg(CONFIG_DEBUG_FS)] 367 + impl<'b, T: 'b> Scope<T> { 368 + fn entry_mut(self: Pin<&mut Self>) -> &mut Entry<'static> { 369 + // SAFETY: _entry is not structurally pinned. 370 + unsafe { &mut Pin::into_inner_unchecked(self)._entry } 371 + } 372 + 373 + fn new<E: 'b, F>(data: impl PinInit<T, E> + 'b, init: F) -> impl PinInit<Self, E> + 'b 374 + where 375 + F: for<'a> FnOnce(&'a T) -> Entry<'static> + 'b, 376 + { 377 + try_pin_init! { 378 + Self { 379 + _entry: Entry::empty(), 380 + data <- data, 381 + _pin: PhantomPinned 382 + } ? E 383 + } 384 + .pin_chain(|scope| { 385 + *scope.entry_mut() = init(&scope.data); 386 + Ok(()) 387 + }) 388 + } 389 + } 390 + 391 + impl<'a, T: 'a> Scope<T> { 392 + /// Creates a new scope, which is a directory at the root of the debugfs filesystem, 393 + /// associated with some data `T`. 394 + /// 395 + /// The `init` closure is called to populate the directory with files and subdirectories. These 396 + /// files can reference the data stored in the scope. 397 + /// 398 + /// The entire directory tree created within the scope will be removed when the returned 399 + /// `Scope` handle is dropped. 400 + pub fn dir<E: 'a, F>( 401 + data: impl PinInit<T, E> + 'a, 402 + name: &'a CStr, 403 + init: F, 404 + ) -> impl PinInit<Self, E> + 'a 405 + where 406 + F: for<'data, 'dir> FnOnce(&'data T, &'dir ScopedDir<'data, 'dir>) + 'a, 407 + { 408 + Scope::new(data, |data| { 409 + let scoped = ScopedDir::new(name); 410 + init(data, &scoped); 411 + scoped.into_entry() 412 + }) 413 + } 414 + } 415 + 416 + impl<T> Deref for Scope<T> { 417 + type Target = T; 418 + fn deref(&self) -> &T { 419 + &self.data 420 + } 421 + } 422 + 423 + impl<T> Deref for File<T> { 424 + type Target = T; 425 + fn deref(&self) -> &T { 426 + &self.scope 427 + } 428 + } 429 + 430 + /// A handle to a directory which will live at most `'dir`, accessing data that will live for at 431 + /// least `'data`. 432 + /// 433 + /// Dropping a ScopedDir will not delete or clean it up, this is expected to occur through dropping 434 + /// the `Scope` that created it. 435 + pub struct ScopedDir<'data, 'dir> { 436 + #[cfg(CONFIG_DEBUG_FS)] 437 + entry: ManuallyDrop<Entry<'dir>>, 438 + _phantom: PhantomData<fn(&'data ()) -> &'dir ()>, 439 + } 440 + 441 + impl<'data, 'dir> ScopedDir<'data, 'dir> { 442 + /// Creates a subdirectory inside this `ScopedDir`. 443 + /// 444 + /// The returned directory handle cannot outlive this one. 445 + pub fn dir<'dir2>(&'dir2 self, name: &CStr) -> ScopedDir<'data, 'dir2> { 446 + #[cfg(not(CONFIG_DEBUG_FS))] 447 + let _ = name; 448 + ScopedDir { 449 + #[cfg(CONFIG_DEBUG_FS)] 450 + entry: ManuallyDrop::new(Entry::dir(name, Some(&*self.entry))), 451 + _phantom: PhantomData, 452 + } 453 + } 454 + 455 + fn create_file<T: Sync>(&self, name: &CStr, data: &'data T, vtable: &'static FileOps<T>) { 456 + #[cfg(CONFIG_DEBUG_FS)] 457 + core::mem::forget(Entry::file(name, &self.entry, data, vtable)); 458 + } 459 + 460 + /// Creates a read-only file in this directory. 461 + /// 462 + /// The file's contents are produced by invoking [`Writer::write`]. 463 + /// 464 + /// This function does not produce an owning handle to the file. The created 465 + /// file is removed when the [`Scope`] that this directory belongs 466 + /// to is dropped. 467 + pub fn read_only_file<T: Writer + Send + Sync + 'static>(&self, name: &CStr, data: &'data T) { 468 + self.create_file(name, data, &T::FILE_OPS) 469 + } 470 + 471 + /// Creates a read-only file in this directory, with contents from a callback. 472 + /// 473 + /// The file contents are generated by calling `f` with `data`. 474 + /// 475 + /// 476 + /// `f` must be a function item or a non-capturing closure. 477 + /// This is statically asserted and not a safety requirement. 478 + /// 479 + /// This function does not produce an owning handle to the file. The created 480 + /// file is removed when the [`Scope`] that this directory belongs 481 + /// to is dropped. 482 + pub fn read_callback_file<T, F>(&self, name: &CStr, data: &'data T, _f: &'static F) 483 + where 484 + T: Send + Sync + 'static, 485 + F: Fn(&T, &mut fmt::Formatter<'_>) -> fmt::Result + Send + Sync, 486 + { 487 + let vtable = <FormatAdapter<T, F> as ReadFile<_>>::FILE_OPS.adapt(); 488 + self.create_file(name, data, vtable) 489 + } 490 + 491 + /// Creates a read-write file in this directory. 492 + /// 493 + /// Reading the file uses the [`Writer`] implementation on `data`. Writing to the file uses 494 + /// the [`Reader`] implementation on `data`. 495 + /// 496 + /// This function does not produce an owning handle to the file. The created 497 + /// file is removed when the [`Scope`] that this directory belongs 498 + /// to is dropped. 499 + pub fn read_write_file<T: Writer + Reader + Send + Sync + 'static>( 500 + &self, 501 + name: &CStr, 502 + data: &'data T, 503 + ) { 504 + let vtable = &<T as ReadWriteFile<_>>::FILE_OPS; 505 + self.create_file(name, data, vtable) 506 + } 507 + 508 + /// Creates a read-write file in this directory, with logic from callbacks. 509 + /// 510 + /// Reading from the file is handled by `f`. Writing to the file is handled by `w`. 511 + /// 512 + /// `f` and `w` must be function items or non-capturing closures. 513 + /// This is statically asserted and not a safety requirement. 514 + /// 515 + /// This function does not produce an owning handle to the file. The created 516 + /// file is removed when the [`Scope`] that this directory belongs 517 + /// to is dropped. 518 + pub fn read_write_callback_file<T, F, W>( 519 + &self, 520 + name: &CStr, 521 + data: &'data T, 522 + _f: &'static F, 523 + _w: &'static W, 524 + ) where 525 + T: Send + Sync + 'static, 526 + F: Fn(&T, &mut fmt::Formatter<'_>) -> fmt::Result + Send + Sync, 527 + W: Fn(&T, &mut UserSliceReader) -> Result + Send + Sync, 528 + { 529 + let vtable = <WritableAdapter<FormatAdapter<T, F>, W> as ReadWriteFile<_>>::FILE_OPS 530 + .adapt() 531 + .adapt(); 532 + self.create_file(name, data, vtable) 533 + } 534 + 535 + /// Creates a write-only file in this directory. 536 + /// 537 + /// Writing to the file uses the [`Reader`] implementation on `data`. 538 + /// 539 + /// This function does not produce an owning handle to the file. The created 540 + /// file is removed when the [`Scope`] that this directory belongs 541 + /// to is dropped. 542 + pub fn write_only_file<T: Reader + Send + Sync + 'static>(&self, name: &CStr, data: &'data T) { 543 + let vtable = &<T as WriteFile<_>>::FILE_OPS; 544 + self.create_file(name, data, vtable) 545 + } 546 + 547 + /// Creates a write-only file in this directory, with write logic from a callback. 548 + /// 549 + /// Writing to the file is handled by `w`. 550 + /// 551 + /// `w` must be a function item or a non-capturing closure. 552 + /// This is statically asserted and not a safety requirement. 553 + /// 554 + /// This function does not produce an owning handle to the file. The created 555 + /// file is removed when the [`Scope`] that this directory belongs 556 + /// to is dropped. 557 + pub fn write_only_callback_file<T, W>(&self, name: &CStr, data: &'data T, _w: &'static W) 558 + where 559 + T: Send + Sync + 'static, 560 + W: Fn(&T, &mut UserSliceReader) -> Result + Send + Sync, 561 + { 562 + let vtable = &<WritableAdapter<NoWriter<T>, W> as WriteFile<_>>::FILE_OPS 563 + .adapt() 564 + .adapt(); 565 + self.create_file(name, data, vtable) 566 + } 567 + 568 + fn empty() -> Self { 569 + ScopedDir { 570 + #[cfg(CONFIG_DEBUG_FS)] 571 + entry: ManuallyDrop::new(Entry::empty()), 572 + _phantom: PhantomData, 573 + } 574 + } 575 + #[cfg(CONFIG_DEBUG_FS)] 576 + fn into_entry(self) -> Entry<'dir> { 577 + ManuallyDrop::into_inner(self.entry) 578 + } 579 + #[cfg(not(CONFIG_DEBUG_FS))] 580 + fn into_entry(self) {} 581 + } 582 + 583 + impl<'data> ScopedDir<'data, 'static> { 584 + // This is safe, but intentionally not exported due to footgun status. A ScopedDir with no 585 + // parent will never be released by default, and needs to have its entry extracted and used 586 + // somewhere. 587 + fn new(name: &CStr) -> ScopedDir<'data, 'static> { 588 + ScopedDir { 589 + #[cfg(CONFIG_DEBUG_FS)] 590 + entry: ManuallyDrop::new(Entry::dir(name, None)), 591 + _phantom: PhantomData, 592 + } 593 + } 594 + }
+122
rust/kernel/debugfs/callback_adapters.rs
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // Copyright (C) 2025 Google LLC. 3 + 4 + //! Adapters which allow the user to supply a write or read implementation as a value rather 5 + //! than a trait implementation. If provided, it will override the trait implementation. 6 + 7 + use super::{Reader, Writer}; 8 + use crate::prelude::*; 9 + use crate::uaccess::UserSliceReader; 10 + use core::fmt; 11 + use core::fmt::Formatter; 12 + use core::marker::PhantomData; 13 + use core::ops::Deref; 14 + 15 + /// # Safety 16 + /// 17 + /// To implement this trait, it must be safe to cast a `&Self` to a `&Inner`. 18 + /// It is intended for use in unstacking adapters out of `FileOps` backings. 19 + pub(crate) unsafe trait Adapter { 20 + type Inner; 21 + } 22 + 23 + /// Adapter to implement `Reader` via a callback with the same representation as `T`. 24 + /// 25 + /// * Layer it on top of `WriterAdapter` if you want to add a custom callback for `write`. 26 + /// * Layer it on top of `NoWriter` to pass through any support present on the underlying type. 27 + /// 28 + /// # Invariants 29 + /// 30 + /// If an instance for `WritableAdapter<_, W>` is constructed, `W` is inhabited. 31 + #[repr(transparent)] 32 + pub(crate) struct WritableAdapter<D, W> { 33 + inner: D, 34 + _writer: PhantomData<W>, 35 + } 36 + 37 + // SAFETY: Stripping off the adapter only removes constraints 38 + unsafe impl<D, W> Adapter for WritableAdapter<D, W> { 39 + type Inner = D; 40 + } 41 + 42 + impl<D: Writer, W> Writer for WritableAdapter<D, W> { 43 + fn write(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { 44 + self.inner.write(fmt) 45 + } 46 + } 47 + 48 + impl<D: Deref, W> Reader for WritableAdapter<D, W> 49 + where 50 + W: Fn(&D::Target, &mut UserSliceReader) -> Result + Send + Sync + 'static, 51 + { 52 + fn read_from_slice(&self, reader: &mut UserSliceReader) -> Result { 53 + // SAFETY: WritableAdapter<_, W> can only be constructed if W is inhabited 54 + let w: &W = unsafe { materialize_zst() }; 55 + w(self.inner.deref(), reader) 56 + } 57 + } 58 + 59 + /// Adapter to implement `Writer` via a callback with the same representation as `T`. 60 + /// 61 + /// # Invariants 62 + /// 63 + /// If an instance for `FormatAdapter<_, F>` is constructed, `F` is inhabited. 64 + #[repr(transparent)] 65 + pub(crate) struct FormatAdapter<D, F> { 66 + inner: D, 67 + _formatter: PhantomData<F>, 68 + } 69 + 70 + impl<D, F> Deref for FormatAdapter<D, F> { 71 + type Target = D; 72 + fn deref(&self) -> &D { 73 + &self.inner 74 + } 75 + } 76 + 77 + impl<D, F> Writer for FormatAdapter<D, F> 78 + where 79 + F: Fn(&D, &mut Formatter<'_>) -> fmt::Result + 'static, 80 + { 81 + fn write(&self, fmt: &mut Formatter<'_>) -> fmt::Result { 82 + // SAFETY: FormatAdapter<_, F> can only be constructed if F is inhabited 83 + let f: &F = unsafe { materialize_zst() }; 84 + f(&self.inner, fmt) 85 + } 86 + } 87 + 88 + // SAFETY: Stripping off the adapter only removes constraints 89 + unsafe impl<D, F> Adapter for FormatAdapter<D, F> { 90 + type Inner = D; 91 + } 92 + 93 + #[repr(transparent)] 94 + pub(crate) struct NoWriter<D> { 95 + inner: D, 96 + } 97 + 98 + // SAFETY: Stripping off the adapter only removes constraints 99 + unsafe impl<D> Adapter for NoWriter<D> { 100 + type Inner = D; 101 + } 102 + 103 + impl<D> Deref for NoWriter<D> { 104 + type Target = D; 105 + fn deref(&self) -> &D { 106 + &self.inner 107 + } 108 + } 109 + 110 + /// For types with a unique value, produce a static reference to it. 111 + /// 112 + /// # Safety 113 + /// 114 + /// The caller asserts that F is inhabited 115 + unsafe fn materialize_zst<F>() -> &'static F { 116 + const { assert!(core::mem::size_of::<F>() == 0) }; 117 + let zst_dangle: core::ptr::NonNull<F> = core::ptr::NonNull::dangling(); 118 + // SAFETY: While the pointer is dangling, it is a dangling pointer to a ZST, based on the 119 + // assertion above. The type is also inhabited, by the caller's assertion. This means 120 + // we can materialize it. 121 + unsafe { zst_dangle.as_ref() } 122 + }
+164
rust/kernel/debugfs/entry.rs
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // Copyright (C) 2025 Google LLC. 3 + 4 + use crate::debugfs::file_ops::FileOps; 5 + use crate::ffi::c_void; 6 + use crate::str::CStr; 7 + use crate::sync::Arc; 8 + use core::marker::PhantomData; 9 + 10 + /// Owning handle to a DebugFS entry. 11 + /// 12 + /// # Invariants 13 + /// 14 + /// The wrapped pointer will always be `NULL`, an error, or an owned DebugFS `dentry`. 15 + pub(crate) struct Entry<'a> { 16 + entry: *mut bindings::dentry, 17 + // If we were created with an owning parent, this is the keep-alive 18 + _parent: Option<Arc<Entry<'static>>>, 19 + // If we were created with a non-owning parent, this prevents us from outliving it 20 + _phantom: PhantomData<&'a ()>, 21 + } 22 + 23 + // SAFETY: [`Entry`] is just a `dentry` under the hood, which the API promises can be transferred 24 + // between threads. 25 + unsafe impl Send for Entry<'_> {} 26 + 27 + // SAFETY: All the C functions we call on the `dentry` pointer are threadsafe. 28 + unsafe impl Sync for Entry<'_> {} 29 + 30 + impl Entry<'static> { 31 + pub(crate) fn dynamic_dir(name: &CStr, parent: Option<Arc<Self>>) -> Self { 32 + let parent_ptr = match &parent { 33 + Some(entry) => entry.as_ptr(), 34 + None => core::ptr::null_mut(), 35 + }; 36 + // SAFETY: The invariants of this function's arguments ensure the safety of this call. 37 + // * `name` is a valid C string by the invariants of `&CStr`. 38 + // * `parent_ptr` is either `NULL` (if `parent` is `None`), or a pointer to a valid 39 + // `dentry` by our invariant. `debugfs_create_dir` handles `NULL` pointers correctly. 40 + let entry = unsafe { bindings::debugfs_create_dir(name.as_char_ptr(), parent_ptr) }; 41 + 42 + Entry { 43 + entry, 44 + _parent: parent, 45 + _phantom: PhantomData, 46 + } 47 + } 48 + 49 + /// # Safety 50 + /// 51 + /// * `data` must outlive the returned `Entry`. 52 + pub(crate) unsafe fn dynamic_file<T>( 53 + name: &CStr, 54 + parent: Arc<Self>, 55 + data: &T, 56 + file_ops: &'static FileOps<T>, 57 + ) -> Self { 58 + // SAFETY: The invariants of this function's arguments ensure the safety of this call. 59 + // * `name` is a valid C string by the invariants of `&CStr`. 60 + // * `parent.as_ptr()` is a pointer to a valid `dentry` by invariant. 61 + // * The caller guarantees that `data` will outlive the returned `Entry`. 62 + // * The guarantees on `FileOps` assert the vtable will be compatible with the data we have 63 + // provided. 64 + let entry = unsafe { 65 + bindings::debugfs_create_file_full( 66 + name.as_char_ptr(), 67 + file_ops.mode(), 68 + parent.as_ptr(), 69 + core::ptr::from_ref(data) as *mut c_void, 70 + core::ptr::null(), 71 + &**file_ops, 72 + ) 73 + }; 74 + 75 + Entry { 76 + entry, 77 + _parent: Some(parent), 78 + _phantom: PhantomData, 79 + } 80 + } 81 + } 82 + 83 + impl<'a> Entry<'a> { 84 + pub(crate) fn dir(name: &CStr, parent: Option<&'a Entry<'_>>) -> Self { 85 + let parent_ptr = match &parent { 86 + Some(entry) => entry.as_ptr(), 87 + None => core::ptr::null_mut(), 88 + }; 89 + // SAFETY: The invariants of this function's arguments ensure the safety of this call. 90 + // * `name` is a valid C string by the invariants of `&CStr`. 91 + // * `parent_ptr` is either `NULL` (if `parent` is `None`), or a pointer to a valid 92 + // `dentry` (because `parent` is a valid reference to an `Entry`). The lifetime `'a` 93 + // ensures that the parent outlives this entry. 94 + let entry = unsafe { bindings::debugfs_create_dir(name.as_char_ptr(), parent_ptr) }; 95 + 96 + Entry { 97 + entry, 98 + _parent: None, 99 + _phantom: PhantomData, 100 + } 101 + } 102 + 103 + pub(crate) fn file<T>( 104 + name: &CStr, 105 + parent: &'a Entry<'_>, 106 + data: &'a T, 107 + file_ops: &FileOps<T>, 108 + ) -> Self { 109 + // SAFETY: The invariants of this function's arguments ensure the safety of this call. 110 + // * `name` is a valid C string by the invariants of `&CStr`. 111 + // * `parent.as_ptr()` is a pointer to a valid `dentry` because we have `&'a Entry`. 112 + // * `data` is a valid pointer to `T` for lifetime `'a`. 113 + // * The returned `Entry` has lifetime `'a`, so it cannot outlive `parent` or `data`. 114 + // * The caller guarantees that `vtable` is compatible with `data`. 115 + // * The guarantees on `FileOps` assert the vtable will be compatible with the data we have 116 + // provided. 117 + let entry = unsafe { 118 + bindings::debugfs_create_file_full( 119 + name.as_char_ptr(), 120 + file_ops.mode(), 121 + parent.as_ptr(), 122 + core::ptr::from_ref(data) as *mut c_void, 123 + core::ptr::null(), 124 + &**file_ops, 125 + ) 126 + }; 127 + 128 + Entry { 129 + entry, 130 + _parent: None, 131 + _phantom: PhantomData, 132 + } 133 + } 134 + } 135 + 136 + impl Entry<'_> { 137 + /// Constructs a placeholder DebugFS [`Entry`]. 138 + pub(crate) fn empty() -> Self { 139 + Self { 140 + entry: core::ptr::null_mut(), 141 + _parent: None, 142 + _phantom: PhantomData, 143 + } 144 + } 145 + 146 + /// Returns the pointer representation of the DebugFS directory. 147 + /// 148 + /// # Guarantees 149 + /// 150 + /// Due to the type invariant, the value returned from this function will always be an error 151 + /// code, NULL, or a live DebugFS directory. If it is live, it will remain live at least as 152 + /// long as this entry lives. 153 + pub(crate) fn as_ptr(&self) -> *mut bindings::dentry { 154 + self.entry 155 + } 156 + } 157 + 158 + impl Drop for Entry<'_> { 159 + fn drop(&mut self) { 160 + // SAFETY: `debugfs_remove` can take `NULL`, error values, and legal DebugFS dentries. 161 + // `as_ptr` guarantees that the pointer is of this form. 162 + unsafe { bindings::debugfs_remove(self.as_ptr()) } 163 + } 164 + }
+247
rust/kernel/debugfs/file_ops.rs
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // Copyright (C) 2025 Google LLC. 3 + 4 + use super::{Reader, Writer}; 5 + use crate::debugfs::callback_adapters::Adapter; 6 + use crate::prelude::*; 7 + use crate::seq_file::SeqFile; 8 + use crate::seq_print; 9 + use crate::uaccess::UserSlice; 10 + use core::fmt::{Display, Formatter, Result}; 11 + use core::marker::PhantomData; 12 + 13 + #[cfg(CONFIG_DEBUG_FS)] 14 + use core::ops::Deref; 15 + 16 + /// # Invariant 17 + /// 18 + /// `FileOps<T>` will always contain an `operations` which is safe to use for a file backed 19 + /// off an inode which has a pointer to a `T` in its private data that is safe to convert 20 + /// into a reference. 21 + pub(super) struct FileOps<T> { 22 + #[cfg(CONFIG_DEBUG_FS)] 23 + operations: bindings::file_operations, 24 + #[cfg(CONFIG_DEBUG_FS)] 25 + mode: u16, 26 + _phantom: PhantomData<T>, 27 + } 28 + 29 + impl<T> FileOps<T> { 30 + /// # Safety 31 + /// 32 + /// The caller asserts that the provided `operations` is safe to use for a file whose 33 + /// inode has a pointer to `T` in its private data that is safe to convert into a reference. 34 + const unsafe fn new(operations: bindings::file_operations, mode: u16) -> Self { 35 + Self { 36 + #[cfg(CONFIG_DEBUG_FS)] 37 + operations, 38 + #[cfg(CONFIG_DEBUG_FS)] 39 + mode, 40 + _phantom: PhantomData, 41 + } 42 + } 43 + 44 + #[cfg(CONFIG_DEBUG_FS)] 45 + pub(crate) const fn mode(&self) -> u16 { 46 + self.mode 47 + } 48 + } 49 + 50 + impl<T: Adapter> FileOps<T> { 51 + pub(super) const fn adapt(&self) -> &FileOps<T::Inner> { 52 + // SAFETY: `Adapter` asserts that `T` can be legally cast to `T::Inner`. 53 + unsafe { core::mem::transmute(self) } 54 + } 55 + } 56 + 57 + #[cfg(CONFIG_DEBUG_FS)] 58 + impl<T> Deref for FileOps<T> { 59 + type Target = bindings::file_operations; 60 + 61 + fn deref(&self) -> &Self::Target { 62 + &self.operations 63 + } 64 + } 65 + 66 + struct WriterAdapter<T>(T); 67 + 68 + impl<'a, T: Writer> Display for WriterAdapter<&'a T> { 69 + fn fmt(&self, f: &mut Formatter<'_>) -> Result { 70 + self.0.write(f) 71 + } 72 + } 73 + 74 + /// Implements `open` for `file_operations` via `single_open` to fill out a `seq_file`. 75 + /// 76 + /// # Safety 77 + /// 78 + /// * `inode`'s private pointer must point to a value of type `T` which will outlive the `inode` 79 + /// and will not have any unique references alias it during the call. 80 + /// * `file` must point to a live, not-yet-initialized file object. 81 + unsafe extern "C" fn writer_open<T: Writer + Sync>( 82 + inode: *mut bindings::inode, 83 + file: *mut bindings::file, 84 + ) -> c_int { 85 + // SAFETY: The caller ensures that `inode` is a valid pointer. 86 + let data = unsafe { (*inode).i_private }; 87 + // SAFETY: 88 + // * `file` is acceptable by caller precondition. 89 + // * `print_act` will be called on a `seq_file` with private data set to the third argument, 90 + // so we meet its safety requirements. 91 + // * The `data` pointer passed in the third argument is a valid `T` pointer that outlives 92 + // this call by caller preconditions. 93 + unsafe { bindings::single_open(file, Some(writer_act::<T>), data) } 94 + } 95 + 96 + /// Prints private data stashed in a seq_file to that seq file. 97 + /// 98 + /// # Safety 99 + /// 100 + /// `seq` must point to a live `seq_file` whose private data is a valid pointer to a `T` which may 101 + /// not have any unique references alias it during the call. 102 + unsafe extern "C" fn writer_act<T: Writer + Sync>( 103 + seq: *mut bindings::seq_file, 104 + _: *mut c_void, 105 + ) -> c_int { 106 + // SAFETY: By caller precondition, this pointer is valid pointer to a `T`, and 107 + // there are not and will not be any unique references until we are done. 108 + let data = unsafe { &*((*seq).private.cast::<T>()) }; 109 + // SAFETY: By caller precondition, `seq_file` points to a live `seq_file`, so we can lift 110 + // it. 111 + let seq_file = unsafe { SeqFile::from_raw(seq) }; 112 + seq_print!(seq_file, "{}", WriterAdapter(data)); 113 + 0 114 + } 115 + 116 + // Work around lack of generic const items. 117 + pub(crate) trait ReadFile<T> { 118 + const FILE_OPS: FileOps<T>; 119 + } 120 + 121 + impl<T: Writer + Sync> ReadFile<T> for T { 122 + const FILE_OPS: FileOps<T> = { 123 + let operations = bindings::file_operations { 124 + read: Some(bindings::seq_read), 125 + llseek: Some(bindings::seq_lseek), 126 + release: Some(bindings::single_release), 127 + open: Some(writer_open::<Self>), 128 + // SAFETY: `file_operations` supports zeroes in all fields. 129 + ..unsafe { core::mem::zeroed() } 130 + }; 131 + // SAFETY: `operations` is all stock `seq_file` implementations except for `writer_open`. 132 + // `open`'s only requirement beyond what is provided to all open functions is that the 133 + // inode's data pointer must point to a `T` that will outlive it, which matches the 134 + // `FileOps` requirements. 135 + unsafe { FileOps::new(operations, 0o400) } 136 + }; 137 + } 138 + 139 + fn read<T: Reader + Sync>(data: &T, buf: *const c_char, count: usize) -> isize { 140 + let mut reader = UserSlice::new(UserPtr::from_ptr(buf as *mut c_void), count).reader(); 141 + 142 + if let Err(e) = data.read_from_slice(&mut reader) { 143 + return e.to_errno() as isize; 144 + } 145 + 146 + count as isize 147 + } 148 + 149 + /// # Safety 150 + /// 151 + /// `file` must be a valid pointer to a `file` struct. 152 + /// The `private_data` of the file must contain a valid pointer to a `seq_file` whose 153 + /// `private` data in turn points to a `T` that implements `Reader`. 154 + /// `buf` must be a valid user-space buffer. 155 + pub(crate) unsafe extern "C" fn write<T: Reader + Sync>( 156 + file: *mut bindings::file, 157 + buf: *const c_char, 158 + count: usize, 159 + _ppos: *mut bindings::loff_t, 160 + ) -> isize { 161 + // SAFETY: The file was opened with `single_open`, which sets `private_data` to a `seq_file`. 162 + let seq = unsafe { &mut *((*file).private_data.cast::<bindings::seq_file>()) }; 163 + // SAFETY: By caller precondition, this pointer is live and points to a value of type `T`. 164 + let data = unsafe { &*(seq.private as *const T) }; 165 + read(data, buf, count) 166 + } 167 + 168 + // A trait to get the file operations for a type. 169 + pub(crate) trait ReadWriteFile<T> { 170 + const FILE_OPS: FileOps<T>; 171 + } 172 + 173 + impl<T: Writer + Reader + Sync> ReadWriteFile<T> for T { 174 + const FILE_OPS: FileOps<T> = { 175 + let operations = bindings::file_operations { 176 + open: Some(writer_open::<T>), 177 + read: Some(bindings::seq_read), 178 + write: Some(write::<T>), 179 + llseek: Some(bindings::seq_lseek), 180 + release: Some(bindings::single_release), 181 + // SAFETY: `file_operations` supports zeroes in all fields. 182 + ..unsafe { core::mem::zeroed() } 183 + }; 184 + // SAFETY: `operations` is all stock `seq_file` implementations except for `writer_open` 185 + // and `write`. 186 + // `writer_open`'s only requirement beyond what is provided to all open functions is that 187 + // the inode's data pointer must point to a `T` that will outlive it, which matches the 188 + // `FileOps` requirements. 189 + // `write` only requires that the file's private data pointer points to `seq_file` 190 + // which points to a `T` that will outlive it, which matches what `writer_open` 191 + // provides. 192 + unsafe { FileOps::new(operations, 0o600) } 193 + }; 194 + } 195 + 196 + /// # Safety 197 + /// 198 + /// `inode` must be a valid pointer to an `inode` struct. 199 + /// `file` must be a valid pointer to a `file` struct. 200 + unsafe extern "C" fn write_only_open( 201 + inode: *mut bindings::inode, 202 + file: *mut bindings::file, 203 + ) -> c_int { 204 + // SAFETY: The caller ensures that `inode` and `file` are valid pointers. 205 + unsafe { (*file).private_data = (*inode).i_private }; 206 + 0 207 + } 208 + 209 + /// # Safety 210 + /// 211 + /// * `file` must be a valid pointer to a `file` struct. 212 + /// * The `private_data` of the file must contain a valid pointer to a `T` that implements 213 + /// `Reader`. 214 + /// * `buf` must be a valid user-space buffer. 215 + pub(crate) unsafe extern "C" fn write_only_write<T: Reader + Sync>( 216 + file: *mut bindings::file, 217 + buf: *const c_char, 218 + count: usize, 219 + _ppos: *mut bindings::loff_t, 220 + ) -> isize { 221 + // SAFETY: The caller ensures that `file` is a valid pointer and that `private_data` holds a 222 + // valid pointer to `T`. 223 + let data = unsafe { &*((*file).private_data as *const T) }; 224 + read(data, buf, count) 225 + } 226 + 227 + pub(crate) trait WriteFile<T> { 228 + const FILE_OPS: FileOps<T>; 229 + } 230 + 231 + impl<T: Reader + Sync> WriteFile<T> for T { 232 + const FILE_OPS: FileOps<T> = { 233 + let operations = bindings::file_operations { 234 + open: Some(write_only_open), 235 + write: Some(write_only_write::<T>), 236 + llseek: Some(bindings::noop_llseek), 237 + // SAFETY: `file_operations` supports zeroes in all fields. 238 + ..unsafe { core::mem::zeroed() } 239 + }; 240 + // SAFETY: 241 + // * `write_only_open` populates the file private data with the inode private data 242 + // * `write_only_write`'s only requirement is that the private data of the file point to 243 + // a `T` and be legal to convert to a shared reference, which `write_only_open` 244 + // satisfies. 245 + unsafe { FileOps::new(operations, 0o200) } 246 + }; 247 + }
+102
rust/kernel/debugfs/traits.rs
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // Copyright (C) 2025 Google LLC. 3 + 4 + //! Traits for rendering or updating values exported to DebugFS. 5 + 6 + use crate::prelude::*; 7 + use crate::sync::Mutex; 8 + use crate::uaccess::UserSliceReader; 9 + use core::fmt::{self, Debug, Formatter}; 10 + use core::str::FromStr; 11 + use core::sync::atomic::{ 12 + AtomicI16, AtomicI32, AtomicI64, AtomicI8, AtomicIsize, AtomicU16, AtomicU32, AtomicU64, 13 + AtomicU8, AtomicUsize, Ordering, 14 + }; 15 + 16 + /// A trait for types that can be written into a string. 17 + /// 18 + /// This works very similarly to `Debug`, and is automatically implemented if `Debug` is 19 + /// implemented for a type. It is also implemented for any writable type inside a `Mutex`. 20 + /// 21 + /// The derived implementation of `Debug` [may 22 + /// change](https://doc.rust-lang.org/std/fmt/trait.Debug.html#stability) 23 + /// between Rust versions, so if stability is key for your use case, please implement `Writer` 24 + /// explicitly instead. 25 + pub trait Writer { 26 + /// Formats the value using the given formatter. 27 + fn write(&self, f: &mut Formatter<'_>) -> fmt::Result; 28 + } 29 + 30 + impl<T: Writer> Writer for Mutex<T> { 31 + fn write(&self, f: &mut Formatter<'_>) -> fmt::Result { 32 + self.lock().write(f) 33 + } 34 + } 35 + 36 + impl<T: Debug> Writer for T { 37 + fn write(&self, f: &mut Formatter<'_>) -> fmt::Result { 38 + writeln!(f, "{self:?}") 39 + } 40 + } 41 + 42 + /// A trait for types that can be updated from a user slice. 43 + /// 44 + /// This works similarly to `FromStr`, but operates on a `UserSliceReader` rather than a &str. 45 + /// 46 + /// It is automatically implemented for all atomic integers, or any type that implements `FromStr` 47 + /// wrapped in a `Mutex`. 48 + pub trait Reader { 49 + /// Updates the value from the given user slice. 50 + fn read_from_slice(&self, reader: &mut UserSliceReader) -> Result; 51 + } 52 + 53 + impl<T: FromStr> Reader for Mutex<T> { 54 + fn read_from_slice(&self, reader: &mut UserSliceReader) -> Result { 55 + let mut buf = [0u8; 128]; 56 + if reader.len() > buf.len() { 57 + return Err(EINVAL); 58 + } 59 + let n = reader.len(); 60 + reader.read_slice(&mut buf[..n])?; 61 + 62 + let s = core::str::from_utf8(&buf[..n]).map_err(|_| EINVAL)?; 63 + let val = s.trim().parse::<T>().map_err(|_| EINVAL)?; 64 + *self.lock() = val; 65 + Ok(()) 66 + } 67 + } 68 + 69 + macro_rules! impl_reader_for_atomic { 70 + ($(($atomic_type:ty, $int_type:ty)),*) => { 71 + $( 72 + impl Reader for $atomic_type { 73 + fn read_from_slice(&self, reader: &mut UserSliceReader) -> Result { 74 + let mut buf = [0u8; 21]; // Enough for a 64-bit number. 75 + if reader.len() > buf.len() { 76 + return Err(EINVAL); 77 + } 78 + let n = reader.len(); 79 + reader.read_slice(&mut buf[..n])?; 80 + 81 + let s = core::str::from_utf8(&buf[..n]).map_err(|_| EINVAL)?; 82 + let val = s.trim().parse::<$int_type>().map_err(|_| EINVAL)?; 83 + self.store(val, Ordering::Relaxed); 84 + Ok(()) 85 + } 86 + } 87 + )* 88 + }; 89 + } 90 + 91 + impl_reader_for_atomic!( 92 + (AtomicI16, i16), 93 + (AtomicI32, i32), 94 + (AtomicI64, i64), 95 + (AtomicI8, i8), 96 + (AtomicIsize, isize), 97 + (AtomicU16, u16), 98 + (AtomicU32, u32), 99 + (AtomicU64, u64), 100 + (AtomicU8, u8), 101 + (AtomicUsize, usize) 102 + );
+4 -3
rust/kernel/device.rs
··· 6 6 7 7 use crate::{ 8 8 bindings, fmt, 9 - types::{ARef, ForeignOwnable, Opaque}, 9 + sync::aref::ARef, 10 + types::{ForeignOwnable, Opaque}, 10 11 }; 11 12 use core::{marker::PhantomData, ptr}; 12 13 ··· 407 406 kernel::impl_device_context_into_aref!(Device); 408 407 409 408 // SAFETY: Instances of `Device` are always reference-counted. 410 - unsafe impl crate::types::AlwaysRefCounted for Device { 409 + unsafe impl crate::sync::aref::AlwaysRefCounted for Device { 411 410 fn inc_ref(&self) { 412 411 // SAFETY: The existence of a shared reference guarantees that the refcount is non-zero. 413 412 unsafe { bindings::get_device(self.as_raw()) }; ··· 573 572 #[macro_export] 574 573 macro_rules! __impl_device_context_into_aref { 575 574 ($src:ty, $device:tt) => { 576 - impl ::core::convert::From<&$device<$src>> for $crate::types::ARef<$device> { 575 + impl ::core::convert::From<&$device<$src>> for $crate::sync::aref::ARef<$device> { 577 576 fn from(dev: &$device<$src>) -> Self { 578 577 (&**dev).into() 579 578 }
+2 -2
rust/kernel/devres.rs
··· 13 13 ffi::c_void, 14 14 prelude::*, 15 15 revocable::{Revocable, RevocableGuard}, 16 - sync::{rcu, Completion}, 17 - types::{ARef, ForeignOwnable, Opaque, ScopeGuard}, 16 + sync::{aref::ARef, rcu, Completion}, 17 + types::{ForeignOwnable, Opaque, ScopeGuard}, 18 18 }; 19 19 20 20 use pin_init::Wrapper;
+1
rust/kernel/io.rs
··· 8 8 use crate::{bindings, build_assert, ffi::c_void}; 9 9 10 10 pub mod mem; 11 + pub mod poll; 11 12 pub mod resource; 12 13 13 14 pub use resource::Resource;
+104
rust/kernel/io/poll.rs
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + //! IO polling. 4 + //! 5 + //! C header: [`include/linux/iopoll.h`](srctree/include/linux/iopoll.h). 6 + 7 + use crate::{ 8 + error::{code::*, Result}, 9 + processor::cpu_relax, 10 + task::might_sleep, 11 + time::{delay::fsleep, Delta, Instant, Monotonic}, 12 + }; 13 + 14 + /// Polls periodically until a condition is met, an error occurs, 15 + /// or the timeout is reached. 16 + /// 17 + /// The function repeatedly executes the given operation `op` closure and 18 + /// checks its result using the condition closure `cond`. 19 + /// 20 + /// If `cond` returns `true`, the function returns successfully with 21 + /// the result of `op`. Otherwise, it waits for a duration specified 22 + /// by `sleep_delta` before executing `op` again. 23 + /// 24 + /// This process continues until either `op` returns an error, `cond` 25 + /// returns `true`, or the timeout specified by `timeout_delta` is 26 + /// reached. 27 + /// 28 + /// This function can only be used in a nonatomic context. 29 + /// 30 + /// # Errors 31 + /// 32 + /// If `op` returns an error, then that error is returned directly. 33 + /// 34 + /// If the timeout specified by `timeout_delta` is reached, then 35 + /// `Err(ETIMEDOUT)` is returned. 36 + /// 37 + /// # Examples 38 + /// 39 + /// ```no_run 40 + /// use kernel::io::{Io, poll::read_poll_timeout}; 41 + /// use kernel::time::Delta; 42 + /// 43 + /// const HW_READY: u16 = 0x01; 44 + /// 45 + /// fn wait_for_hardware<const SIZE: usize>(io: &Io<SIZE>) -> Result<()> { 46 + /// match read_poll_timeout( 47 + /// // The `op` closure reads the value of a specific status register. 48 + /// || io.try_read16(0x1000), 49 + /// // The `cond` closure takes a reference to the value returned by `op` 50 + /// // and checks whether the hardware is ready. 51 + /// |val: &u16| *val == HW_READY, 52 + /// Delta::from_millis(50), 53 + /// Delta::from_secs(3), 54 + /// ) { 55 + /// Ok(_) => { 56 + /// // The hardware is ready. The returned value of the `op` closure 57 + /// // isn't used. 58 + /// Ok(()) 59 + /// } 60 + /// Err(e) => Err(e), 61 + /// } 62 + /// } 63 + /// ``` 64 + #[track_caller] 65 + pub fn read_poll_timeout<Op, Cond, T>( 66 + mut op: Op, 67 + mut cond: Cond, 68 + sleep_delta: Delta, 69 + timeout_delta: Delta, 70 + ) -> Result<T> 71 + where 72 + Op: FnMut() -> Result<T>, 73 + Cond: FnMut(&T) -> bool, 74 + { 75 + let start: Instant<Monotonic> = Instant::now(); 76 + 77 + // Unlike the C version, we always call `might_sleep()` unconditionally, 78 + // as conditional calls are error-prone. We clearly separate 79 + // `read_poll_timeout()` and `read_poll_timeout_atomic()` to aid 80 + // tools like klint. 81 + might_sleep(); 82 + 83 + loop { 84 + let val = op()?; 85 + if cond(&val) { 86 + // Unlike the C version, we immediately return. 87 + // We know the condition is met so we don't need to check again. 88 + return Ok(val); 89 + } 90 + 91 + if start.elapsed() > timeout_delta { 92 + // Unlike the C version, we immediately return. 93 + // We have just called `op()` so we don't need to call it again. 94 + return Err(ETIMEDOUT); 95 + } 96 + 97 + if !sleep_delta.is_zero() { 98 + fsleep(sleep_delta); 99 + } 100 + 101 + // `fsleep()` could be a busy-wait loop so we always call `cpu_relax()`. 102 + cpu_relax(); 103 + } 104 + }
+24
rust/kernel/irq.rs
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + //! IRQ abstractions. 4 + //! 5 + //! An IRQ is an interrupt request from a device. It is used to get the CPU's 6 + //! attention so it can service a hardware event in a timely manner. 7 + //! 8 + //! The current abstractions handle IRQ requests and handlers, i.e.: it allows 9 + //! drivers to register a handler for a given IRQ line. 10 + //! 11 + //! C header: [`include/linux/device.h`](srctree/include/linux/interrupt.h) 12 + 13 + /// Flags to be used when registering IRQ handlers. 14 + mod flags; 15 + 16 + /// IRQ allocation and handling. 17 + mod request; 18 + 19 + pub use flags::Flags; 20 + 21 + pub use request::{ 22 + Handler, IrqRequest, IrqReturn, Registration, ThreadedHandler, ThreadedIrqReturn, 23 + ThreadedRegistration, 24 + };
+124
rust/kernel/irq/flags.rs
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // SPDX-FileCopyrightText: Copyright 2025 Collabora ltd. 3 + 4 + use crate::bindings; 5 + use crate::prelude::*; 6 + 7 + /// Flags to be used when registering IRQ handlers. 8 + /// 9 + /// Flags can be used to request specific behaviors when registering an IRQ 10 + /// handler, and can be combined using the `|`, `&`, and `!` operators to 11 + /// further control the system's behavior. 12 + /// 13 + /// A common use case is to register a shared interrupt, as sharing the line 14 + /// between devices is increasingly common in modern systems and is even 15 + /// required for some buses. This requires setting [`Flags::SHARED`] when 16 + /// requesting the interrupt. Other use cases include setting the trigger type 17 + /// through `Flags::TRIGGER_*`, which determines when the interrupt fires, or 18 + /// controlling whether the interrupt is masked after the handler runs by using 19 + /// [`Flags::ONESHOT`]. 20 + /// 21 + /// If an invalid combination of flags is provided, the system will refuse to 22 + /// register the handler, and lower layers will enforce certain flags when 23 + /// necessary. This means, for example, that all the 24 + /// [`crate::irq::Registration`] for a shared interrupt have to agree on 25 + /// [`Flags::SHARED`] and on the same trigger type, if set. 26 + #[derive(Clone, Copy, PartialEq, Eq)] 27 + pub struct Flags(c_ulong); 28 + 29 + impl Flags { 30 + /// Use the interrupt line as already configured. 31 + pub const TRIGGER_NONE: Flags = Flags::new(bindings::IRQF_TRIGGER_NONE); 32 + 33 + /// The interrupt is triggered when the signal goes from low to high. 34 + pub const TRIGGER_RISING: Flags = Flags::new(bindings::IRQF_TRIGGER_RISING); 35 + 36 + /// The interrupt is triggered when the signal goes from high to low. 37 + pub const TRIGGER_FALLING: Flags = Flags::new(bindings::IRQF_TRIGGER_FALLING); 38 + 39 + /// The interrupt is triggered while the signal is held high. 40 + pub const TRIGGER_HIGH: Flags = Flags::new(bindings::IRQF_TRIGGER_HIGH); 41 + 42 + /// The interrupt is triggered while the signal is held low. 43 + pub const TRIGGER_LOW: Flags = Flags::new(bindings::IRQF_TRIGGER_LOW); 44 + 45 + /// Allow sharing the IRQ among several devices. 46 + pub const SHARED: Flags = Flags::new(bindings::IRQF_SHARED); 47 + 48 + /// Set by callers when they expect sharing mismatches to occur. 49 + pub const PROBE_SHARED: Flags = Flags::new(bindings::IRQF_PROBE_SHARED); 50 + 51 + /// Flag to mark this interrupt as timer interrupt. 52 + pub const TIMER: Flags = Flags::new(bindings::IRQF_TIMER); 53 + 54 + /// Interrupt is per CPU. 55 + pub const PERCPU: Flags = Flags::new(bindings::IRQF_PERCPU); 56 + 57 + /// Flag to exclude this interrupt from irq balancing. 58 + pub const NOBALANCING: Flags = Flags::new(bindings::IRQF_NOBALANCING); 59 + 60 + /// Interrupt is used for polling (only the interrupt that is registered 61 + /// first in a shared interrupt is considered for performance reasons). 62 + pub const IRQPOLL: Flags = Flags::new(bindings::IRQF_IRQPOLL); 63 + 64 + /// Interrupt is not re-enabled after the hardirq handler finished. Used by 65 + /// threaded interrupts which need to keep the irq line disabled until the 66 + /// threaded handler has been run. 67 + pub const ONESHOT: Flags = Flags::new(bindings::IRQF_ONESHOT); 68 + 69 + /// Do not disable this IRQ during suspend. Does not guarantee that this 70 + /// interrupt will wake the system from a suspended state. 71 + pub const NO_SUSPEND: Flags = Flags::new(bindings::IRQF_NO_SUSPEND); 72 + 73 + /// Force enable it on resume even if [`Flags::NO_SUSPEND`] is set. 74 + pub const FORCE_RESUME: Flags = Flags::new(bindings::IRQF_FORCE_RESUME); 75 + 76 + /// Interrupt cannot be threaded. 77 + pub const NO_THREAD: Flags = Flags::new(bindings::IRQF_NO_THREAD); 78 + 79 + /// Resume IRQ early during syscore instead of at device resume time. 80 + pub const EARLY_RESUME: Flags = Flags::new(bindings::IRQF_EARLY_RESUME); 81 + 82 + /// If the IRQ is shared with a [`Flags::NO_SUSPEND`] user, execute this 83 + /// interrupt handler after suspending interrupts. For system wakeup devices 84 + /// users need to implement wakeup detection in their interrupt handlers. 85 + pub const COND_SUSPEND: Flags = Flags::new(bindings::IRQF_COND_SUSPEND); 86 + 87 + /// Don't enable IRQ or NMI automatically when users request it. Users will 88 + /// enable it explicitly by `enable_irq` or `enable_nmi` later. 89 + pub const NO_AUTOEN: Flags = Flags::new(bindings::IRQF_NO_AUTOEN); 90 + 91 + /// Exclude from runnaway detection for IPI and similar handlers, depends on 92 + /// `PERCPU`. 93 + pub const NO_DEBUG: Flags = Flags::new(bindings::IRQF_NO_DEBUG); 94 + 95 + pub(crate) fn into_inner(self) -> c_ulong { 96 + self.0 97 + } 98 + 99 + const fn new(value: u32) -> Self { 100 + build_assert!(value as u64 <= c_ulong::MAX as u64); 101 + Self(value as c_ulong) 102 + } 103 + } 104 + 105 + impl core::ops::BitOr for Flags { 106 + type Output = Self; 107 + fn bitor(self, rhs: Self) -> Self::Output { 108 + Self(self.0 | rhs.0) 109 + } 110 + } 111 + 112 + impl core::ops::BitAnd for Flags { 113 + type Output = Self; 114 + fn bitand(self, rhs: Self) -> Self::Output { 115 + Self(self.0 & rhs.0) 116 + } 117 + } 118 + 119 + impl core::ops::Not for Flags { 120 + type Output = Self; 121 + fn not(self) -> Self::Output { 122 + Self(!self.0) 123 + } 124 + }
+507
rust/kernel/irq/request.rs
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + // SPDX-FileCopyrightText: Copyright 2025 Collabora ltd. 3 + 4 + //! This module provides types like [`Registration`] and 5 + //! [`ThreadedRegistration`], which allow users to register handlers for a given 6 + //! IRQ line. 7 + 8 + use core::marker::PhantomPinned; 9 + 10 + use crate::alloc::Allocator; 11 + use crate::device::{Bound, Device}; 12 + use crate::devres::Devres; 13 + use crate::error::to_result; 14 + use crate::irq::flags::Flags; 15 + use crate::prelude::*; 16 + use crate::str::CStr; 17 + use crate::sync::Arc; 18 + 19 + /// The value that can be returned from a [`Handler`] or a [`ThreadedHandler`]. 20 + #[repr(u32)] 21 + pub enum IrqReturn { 22 + /// The interrupt was not from this device or was not handled. 23 + None = bindings::irqreturn_IRQ_NONE, 24 + 25 + /// The interrupt was handled by this device. 26 + Handled = bindings::irqreturn_IRQ_HANDLED, 27 + } 28 + 29 + /// Callbacks for an IRQ handler. 30 + pub trait Handler: Sync { 31 + /// The hard IRQ handler. 32 + /// 33 + /// This is executed in interrupt context, hence all corresponding 34 + /// limitations do apply. 35 + /// 36 + /// All work that does not necessarily need to be executed from 37 + /// interrupt context, should be deferred to a threaded handler. 38 + /// See also [`ThreadedRegistration`]. 39 + fn handle(&self, device: &Device<Bound>) -> IrqReturn; 40 + } 41 + 42 + impl<T: ?Sized + Handler + Send> Handler for Arc<T> { 43 + fn handle(&self, device: &Device<Bound>) -> IrqReturn { 44 + T::handle(self, device) 45 + } 46 + } 47 + 48 + impl<T: ?Sized + Handler, A: Allocator> Handler for Box<T, A> { 49 + fn handle(&self, device: &Device<Bound>) -> IrqReturn { 50 + T::handle(self, device) 51 + } 52 + } 53 + 54 + /// # Invariants 55 + /// 56 + /// - `self.irq` is the same as the one passed to `request_{threaded}_irq`. 57 + /// - `cookie` was passed to `request_{threaded}_irq` as the cookie. It is guaranteed to be unique 58 + /// by the type system, since each call to `new` will return a different instance of 59 + /// `Registration`. 60 + #[pin_data(PinnedDrop)] 61 + struct RegistrationInner { 62 + irq: u32, 63 + cookie: *mut c_void, 64 + } 65 + 66 + impl RegistrationInner { 67 + fn synchronize(&self) { 68 + // SAFETY: safe as per the invariants of `RegistrationInner` 69 + unsafe { bindings::synchronize_irq(self.irq) }; 70 + } 71 + } 72 + 73 + #[pinned_drop] 74 + impl PinnedDrop for RegistrationInner { 75 + fn drop(self: Pin<&mut Self>) { 76 + // SAFETY: 77 + // 78 + // Safe as per the invariants of `RegistrationInner` and: 79 + // 80 + // - The containing struct is `!Unpin` and was initialized using 81 + // pin-init, so it occupied the same memory location for the entirety of 82 + // its lifetime. 83 + // 84 + // Notice that this will block until all handlers finish executing, 85 + // i.e.: at no point will &self be invalid while the handler is running. 86 + unsafe { bindings::free_irq(self.irq, self.cookie) }; 87 + } 88 + } 89 + 90 + // SAFETY: We only use `inner` on drop, which called at most once with no 91 + // concurrent access. 92 + unsafe impl Sync for RegistrationInner {} 93 + 94 + // SAFETY: It is safe to send `RegistrationInner` across threads. 95 + unsafe impl Send for RegistrationInner {} 96 + 97 + /// A request for an IRQ line for a given device. 98 + /// 99 + /// # Invariants 100 + /// 101 + /// - `ìrq` is the number of an interrupt source of `dev`. 102 + /// - `irq` has not been registered yet. 103 + pub struct IrqRequest<'a> { 104 + dev: &'a Device<Bound>, 105 + irq: u32, 106 + } 107 + 108 + impl<'a> IrqRequest<'a> { 109 + /// Creates a new IRQ request for the given device and IRQ number. 110 + /// 111 + /// # Safety 112 + /// 113 + /// - `irq` should be a valid IRQ number for `dev`. 114 + pub(crate) unsafe fn new(dev: &'a Device<Bound>, irq: u32) -> Self { 115 + // INVARIANT: `irq` is a valid IRQ number for `dev`. 116 + IrqRequest { dev, irq } 117 + } 118 + 119 + /// Returns the IRQ number of an [`IrqRequest`]. 120 + pub fn irq(&self) -> u32 { 121 + self.irq 122 + } 123 + } 124 + 125 + /// A registration of an IRQ handler for a given IRQ line. 126 + /// 127 + /// # Examples 128 + /// 129 + /// The following is an example of using `Registration`. It uses a 130 + /// [`Completion`] to coordinate between the IRQ 131 + /// handler and process context. [`Completion`] uses interior mutability, so the 132 + /// handler can signal with [`Completion::complete_all()`] and the process 133 + /// context can wait with [`Completion::wait_for_completion()`] even though 134 + /// there is no way to get a mutable reference to the any of the fields in 135 + /// `Data`. 136 + /// 137 + /// [`Completion`]: kernel::sync::Completion 138 + /// [`Completion::complete_all()`]: kernel::sync::Completion::complete_all 139 + /// [`Completion::wait_for_completion()`]: kernel::sync::Completion::wait_for_completion 140 + /// 141 + /// ``` 142 + /// use kernel::c_str; 143 + /// use kernel::device::{Bound, Device}; 144 + /// use kernel::irq::{self, Flags, IrqRequest, IrqReturn, Registration}; 145 + /// use kernel::prelude::*; 146 + /// use kernel::sync::{Arc, Completion}; 147 + /// 148 + /// // Data shared between process and IRQ context. 149 + /// #[pin_data] 150 + /// struct Data { 151 + /// #[pin] 152 + /// completion: Completion, 153 + /// } 154 + /// 155 + /// impl irq::Handler for Data { 156 + /// // Executed in IRQ context. 157 + /// fn handle(&self, _dev: &Device<Bound>) -> IrqReturn { 158 + /// self.completion.complete_all(); 159 + /// IrqReturn::Handled 160 + /// } 161 + /// } 162 + /// 163 + /// // Registers an IRQ handler for the given IrqRequest. 164 + /// // 165 + /// // This runs in process context and assumes `request` was previously acquired from a device. 166 + /// fn register_irq( 167 + /// handler: impl PinInit<Data, Error>, 168 + /// request: IrqRequest<'_>, 169 + /// ) -> Result<Arc<Registration<Data>>> { 170 + /// let registration = Registration::new(request, Flags::SHARED, c_str!("my_device"), handler); 171 + /// 172 + /// let registration = Arc::pin_init(registration, GFP_KERNEL)?; 173 + /// 174 + /// registration.handler().completion.wait_for_completion(); 175 + /// 176 + /// Ok(registration) 177 + /// } 178 + /// # Ok::<(), Error>(()) 179 + /// ``` 180 + /// 181 + /// # Invariants 182 + /// 183 + /// * We own an irq handler whose cookie is a pointer to `Self`. 184 + #[pin_data] 185 + pub struct Registration<T: Handler + 'static> { 186 + #[pin] 187 + inner: Devres<RegistrationInner>, 188 + 189 + #[pin] 190 + handler: T, 191 + 192 + /// Pinned because we need address stability so that we can pass a pointer 193 + /// to the callback. 194 + #[pin] 195 + _pin: PhantomPinned, 196 + } 197 + 198 + impl<T: Handler + 'static> Registration<T> { 199 + /// Registers the IRQ handler with the system for the given IRQ number. 200 + pub fn new<'a>( 201 + request: IrqRequest<'a>, 202 + flags: Flags, 203 + name: &'static CStr, 204 + handler: impl PinInit<T, Error> + 'a, 205 + ) -> impl PinInit<Self, Error> + 'a { 206 + try_pin_init!(&this in Self { 207 + handler <- handler, 208 + inner <- Devres::new( 209 + request.dev, 210 + try_pin_init!(RegistrationInner { 211 + // INVARIANT: `this` is a valid pointer to the `Registration` instance 212 + cookie: this.as_ptr().cast::<c_void>(), 213 + irq: { 214 + // SAFETY: 215 + // - The callbacks are valid for use with request_irq. 216 + // - If this succeeds, the slot is guaranteed to be valid until the 217 + // destructor of Self runs, which will deregister the callbacks 218 + // before the memory location becomes invalid. 219 + // - When request_irq is called, everything that handle_irq_callback will 220 + // touch has already been initialized, so it's safe for the callback to 221 + // be called immediately. 222 + to_result(unsafe { 223 + bindings::request_irq( 224 + request.irq, 225 + Some(handle_irq_callback::<T>), 226 + flags.into_inner(), 227 + name.as_char_ptr(), 228 + this.as_ptr().cast::<c_void>(), 229 + ) 230 + })?; 231 + request.irq 232 + } 233 + }) 234 + ), 235 + _pin: PhantomPinned, 236 + }) 237 + } 238 + 239 + /// Returns a reference to the handler that was registered with the system. 240 + pub fn handler(&self) -> &T { 241 + &self.handler 242 + } 243 + 244 + /// Wait for pending IRQ handlers on other CPUs. 245 + /// 246 + /// This will attempt to access the inner [`Devres`] container. 247 + pub fn try_synchronize(&self) -> Result { 248 + let inner = self.inner.try_access().ok_or(ENODEV)?; 249 + inner.synchronize(); 250 + Ok(()) 251 + } 252 + 253 + /// Wait for pending IRQ handlers on other CPUs. 254 + pub fn synchronize(&self, dev: &Device<Bound>) -> Result { 255 + let inner = self.inner.access(dev)?; 256 + inner.synchronize(); 257 + Ok(()) 258 + } 259 + } 260 + 261 + /// # Safety 262 + /// 263 + /// This function should be only used as the callback in `request_irq`. 264 + unsafe extern "C" fn handle_irq_callback<T: Handler>(_irq: i32, ptr: *mut c_void) -> c_uint { 265 + // SAFETY: `ptr` is a pointer to `Registration<T>` set in `Registration::new` 266 + let registration = unsafe { &*(ptr as *const Registration<T>) }; 267 + // SAFETY: The irq callback is removed before the device is unbound, so the fact that the irq 268 + // callback is running implies that the device has not yet been unbound. 269 + let device = unsafe { registration.inner.device().as_bound() }; 270 + 271 + T::handle(&registration.handler, device) as c_uint 272 + } 273 + 274 + /// The value that can be returned from [`ThreadedHandler::handle`]. 275 + #[repr(u32)] 276 + pub enum ThreadedIrqReturn { 277 + /// The interrupt was not from this device or was not handled. 278 + None = bindings::irqreturn_IRQ_NONE, 279 + 280 + /// The interrupt was handled by this device. 281 + Handled = bindings::irqreturn_IRQ_HANDLED, 282 + 283 + /// The handler wants the handler thread to wake up. 284 + WakeThread = bindings::irqreturn_IRQ_WAKE_THREAD, 285 + } 286 + 287 + /// Callbacks for a threaded IRQ handler. 288 + pub trait ThreadedHandler: Sync { 289 + /// The hard IRQ handler. 290 + /// 291 + /// This is executed in interrupt context, hence all corresponding 292 + /// limitations do apply. All work that does not necessarily need to be 293 + /// executed from interrupt context, should be deferred to the threaded 294 + /// handler, i.e. [`ThreadedHandler::handle_threaded`]. 295 + /// 296 + /// The default implementation returns [`ThreadedIrqReturn::WakeThread`]. 297 + #[expect(unused_variables)] 298 + fn handle(&self, device: &Device<Bound>) -> ThreadedIrqReturn { 299 + ThreadedIrqReturn::WakeThread 300 + } 301 + 302 + /// The threaded IRQ handler. 303 + /// 304 + /// This is executed in process context. The kernel creates a dedicated 305 + /// `kthread` for this purpose. 306 + fn handle_threaded(&self, device: &Device<Bound>) -> IrqReturn; 307 + } 308 + 309 + impl<T: ?Sized + ThreadedHandler + Send> ThreadedHandler for Arc<T> { 310 + fn handle(&self, device: &Device<Bound>) -> ThreadedIrqReturn { 311 + T::handle(self, device) 312 + } 313 + 314 + fn handle_threaded(&self, device: &Device<Bound>) -> IrqReturn { 315 + T::handle_threaded(self, device) 316 + } 317 + } 318 + 319 + impl<T: ?Sized + ThreadedHandler, A: Allocator> ThreadedHandler for Box<T, A> { 320 + fn handle(&self, device: &Device<Bound>) -> ThreadedIrqReturn { 321 + T::handle(self, device) 322 + } 323 + 324 + fn handle_threaded(&self, device: &Device<Bound>) -> IrqReturn { 325 + T::handle_threaded(self, device) 326 + } 327 + } 328 + 329 + /// A registration of a threaded IRQ handler for a given IRQ line. 330 + /// 331 + /// Two callbacks are required: one to handle the IRQ, and one to handle any 332 + /// other work in a separate thread. 333 + /// 334 + /// The thread handler is only called if the IRQ handler returns 335 + /// [`ThreadedIrqReturn::WakeThread`]. 336 + /// 337 + /// # Examples 338 + /// 339 + /// The following is an example of using [`ThreadedRegistration`]. It uses a 340 + /// [`Mutex`](kernel::sync::Mutex) to provide interior mutability. 341 + /// 342 + /// ``` 343 + /// use kernel::c_str; 344 + /// use kernel::device::{Bound, Device}; 345 + /// use kernel::irq::{ 346 + /// self, Flags, IrqRequest, IrqReturn, ThreadedHandler, ThreadedIrqReturn, 347 + /// ThreadedRegistration, 348 + /// }; 349 + /// use kernel::prelude::*; 350 + /// use kernel::sync::{Arc, Mutex}; 351 + /// 352 + /// // Declare a struct that will be passed in when the interrupt fires. The u32 353 + /// // merely serves as an example of some internal data. 354 + /// // 355 + /// // [`irq::ThreadedHandler::handle`] takes `&self`. This example 356 + /// // illustrates how interior mutability can be used when sharing the data 357 + /// // between process context and IRQ context. 358 + /// #[pin_data] 359 + /// struct Data { 360 + /// #[pin] 361 + /// value: Mutex<u32>, 362 + /// } 363 + /// 364 + /// impl ThreadedHandler for Data { 365 + /// // This will run (in a separate kthread) if and only if 366 + /// // [`ThreadedHandler::handle`] returns [`WakeThread`], which it does by 367 + /// // default. 368 + /// fn handle_threaded(&self, _dev: &Device<Bound>) -> IrqReturn { 369 + /// let mut data = self.value.lock(); 370 + /// *data += 1; 371 + /// IrqReturn::Handled 372 + /// } 373 + /// } 374 + /// 375 + /// // Registers a threaded IRQ handler for the given [`IrqRequest`]. 376 + /// // 377 + /// // This is executing in process context and assumes that `request` was 378 + /// // previously acquired from a device. 379 + /// fn register_threaded_irq( 380 + /// handler: impl PinInit<Data, Error>, 381 + /// request: IrqRequest<'_>, 382 + /// ) -> Result<Arc<ThreadedRegistration<Data>>> { 383 + /// let registration = 384 + /// ThreadedRegistration::new(request, Flags::SHARED, c_str!("my_device"), handler); 385 + /// 386 + /// let registration = Arc::pin_init(registration, GFP_KERNEL)?; 387 + /// 388 + /// { 389 + /// // The data can be accessed from process context too. 390 + /// let mut data = registration.handler().value.lock(); 391 + /// *data += 1; 392 + /// } 393 + /// 394 + /// Ok(registration) 395 + /// } 396 + /// # Ok::<(), Error>(()) 397 + /// ``` 398 + /// 399 + /// # Invariants 400 + /// 401 + /// * We own an irq handler whose cookie is a pointer to `Self`. 402 + #[pin_data] 403 + pub struct ThreadedRegistration<T: ThreadedHandler + 'static> { 404 + #[pin] 405 + inner: Devres<RegistrationInner>, 406 + 407 + #[pin] 408 + handler: T, 409 + 410 + /// Pinned because we need address stability so that we can pass a pointer 411 + /// to the callback. 412 + #[pin] 413 + _pin: PhantomPinned, 414 + } 415 + 416 + impl<T: ThreadedHandler + 'static> ThreadedRegistration<T> { 417 + /// Registers the IRQ handler with the system for the given IRQ number. 418 + pub fn new<'a>( 419 + request: IrqRequest<'a>, 420 + flags: Flags, 421 + name: &'static CStr, 422 + handler: impl PinInit<T, Error> + 'a, 423 + ) -> impl PinInit<Self, Error> + 'a { 424 + try_pin_init!(&this in Self { 425 + handler <- handler, 426 + inner <- Devres::new( 427 + request.dev, 428 + try_pin_init!(RegistrationInner { 429 + // INVARIANT: `this` is a valid pointer to the `ThreadedRegistration` instance. 430 + cookie: this.as_ptr().cast::<c_void>(), 431 + irq: { 432 + // SAFETY: 433 + // - The callbacks are valid for use with request_threaded_irq. 434 + // - If this succeeds, the slot is guaranteed to be valid until the 435 + // destructor of Self runs, which will deregister the callbacks 436 + // before the memory location becomes invalid. 437 + // - When request_threaded_irq is called, everything that the two callbacks 438 + // will touch has already been initialized, so it's safe for the 439 + // callbacks to be called immediately. 440 + to_result(unsafe { 441 + bindings::request_threaded_irq( 442 + request.irq, 443 + Some(handle_threaded_irq_callback::<T>), 444 + Some(thread_fn_callback::<T>), 445 + flags.into_inner(), 446 + name.as_char_ptr(), 447 + this.as_ptr().cast::<c_void>(), 448 + ) 449 + })?; 450 + request.irq 451 + } 452 + }) 453 + ), 454 + _pin: PhantomPinned, 455 + }) 456 + } 457 + 458 + /// Returns a reference to the handler that was registered with the system. 459 + pub fn handler(&self) -> &T { 460 + &self.handler 461 + } 462 + 463 + /// Wait for pending IRQ handlers on other CPUs. 464 + /// 465 + /// This will attempt to access the inner [`Devres`] container. 466 + pub fn try_synchronize(&self) -> Result { 467 + let inner = self.inner.try_access().ok_or(ENODEV)?; 468 + inner.synchronize(); 469 + Ok(()) 470 + } 471 + 472 + /// Wait for pending IRQ handlers on other CPUs. 473 + pub fn synchronize(&self, dev: &Device<Bound>) -> Result { 474 + let inner = self.inner.access(dev)?; 475 + inner.synchronize(); 476 + Ok(()) 477 + } 478 + } 479 + 480 + /// # Safety 481 + /// 482 + /// This function should be only used as the callback in `request_threaded_irq`. 483 + unsafe extern "C" fn handle_threaded_irq_callback<T: ThreadedHandler>( 484 + _irq: i32, 485 + ptr: *mut c_void, 486 + ) -> c_uint { 487 + // SAFETY: `ptr` is a pointer to `ThreadedRegistration<T>` set in `ThreadedRegistration::new` 488 + let registration = unsafe { &*(ptr as *const ThreadedRegistration<T>) }; 489 + // SAFETY: The irq callback is removed before the device is unbound, so the fact that the irq 490 + // callback is running implies that the device has not yet been unbound. 491 + let device = unsafe { registration.inner.device().as_bound() }; 492 + 493 + T::handle(&registration.handler, device) as c_uint 494 + } 495 + 496 + /// # Safety 497 + /// 498 + /// This function should be only used as the callback in `request_threaded_irq`. 499 + unsafe extern "C" fn thread_fn_callback<T: ThreadedHandler>(_irq: i32, ptr: *mut c_void) -> c_uint { 500 + // SAFETY: `ptr` is a pointer to `ThreadedRegistration<T>` set in `ThreadedRegistration::new` 501 + let registration = unsafe { &*(ptr as *const ThreadedRegistration<T>) }; 502 + // SAFETY: The irq callback is removed before the device is unbound, so the fact that the irq 503 + // callback is running implies that the device has not yet been unbound. 504 + let device = unsafe { registration.inner.device().as_bound() }; 505 + 506 + T::handle_threaded(&registration.handler, device) as c_uint 507 + }
+3
rust/kernel/lib.rs
··· 78 78 pub mod cpufreq; 79 79 pub mod cpumask; 80 80 pub mod cred; 81 + pub mod debugfs; 81 82 pub mod device; 82 83 pub mod device_id; 83 84 pub mod devres; ··· 95 94 pub mod init; 96 95 pub mod io; 97 96 pub mod ioctl; 97 + pub mod irq; 98 98 pub mod jump_label; 99 99 #[cfg(CONFIG_KUNIT)] 100 100 pub mod kunit; ··· 114 112 pub mod platform; 115 113 pub mod prelude; 116 114 pub mod print; 115 + pub mod processor; 117 116 pub mod ptr; 118 117 pub mod rbtree; 119 118 pub mod regulator;
+163 -17
rust/kernel/pci.rs
··· 10 10 devres::Devres, 11 11 driver, 12 12 error::{from_result, to_result, Result}, 13 - io::Io, 14 - io::IoRaw, 13 + io::{Io, IoRaw}, 14 + irq::{self, IrqRequest}, 15 15 str::CStr, 16 - types::{ARef, Opaque}, 16 + sync::aref::ARef, 17 + types::Opaque, 17 18 ThisModule, 18 19 }; 19 20 use core::{ ··· 23 22 ptr::{addr_of_mut, NonNull}, 24 23 }; 25 24 use kernel::prelude::*; 25 + 26 + mod id; 27 + 28 + pub use self::id::{Class, ClassMask, Vendor}; 26 29 27 30 /// An adapter for the registration of PCI drivers. 28 31 pub struct Adapter<T: Driver>(T); ··· 65 60 extern "C" fn probe_callback( 66 61 pdev: *mut bindings::pci_dev, 67 62 id: *const bindings::pci_device_id, 68 - ) -> kernel::ffi::c_int { 63 + ) -> c_int { 69 64 // SAFETY: The PCI bus only ever calls the probe callback with a valid pointer to a 70 65 // `struct pci_dev`. 71 66 // ··· 133 128 134 129 /// Equivalent to C's `PCI_DEVICE` macro. 135 130 /// 136 - /// Create a new `pci::DeviceId` from a vendor and device ID number. 137 - pub const fn from_id(vendor: u32, device: u32) -> Self { 131 + /// Create a new `pci::DeviceId` from a vendor and device ID. 132 + #[inline] 133 + pub const fn from_id(vendor: Vendor, device: u32) -> Self { 138 134 Self(bindings::pci_device_id { 139 - vendor, 135 + vendor: vendor.as_raw() as u32, 140 136 device, 141 137 subvendor: DeviceId::PCI_ANY_ID, 142 138 subdevice: DeviceId::PCI_ANY_ID, ··· 151 145 /// Equivalent to C's `PCI_DEVICE_CLASS` macro. 152 146 /// 153 147 /// Create a new `pci::DeviceId` from a class number and mask. 148 + #[inline] 154 149 pub const fn from_class(class: u32, class_mask: u32) -> Self { 155 150 Self(bindings::pci_device_id { 156 151 vendor: DeviceId::PCI_ANY_ID, ··· 160 153 subdevice: DeviceId::PCI_ANY_ID, 161 154 class, 162 155 class_mask, 156 + driver_data: 0, 157 + override_only: 0, 158 + }) 159 + } 160 + 161 + /// Create a new [`DeviceId`] from a class number, mask, and specific vendor. 162 + /// 163 + /// This is more targeted than [`DeviceId::from_class`]: in addition to matching by [`Vendor`], 164 + /// it also matches the PCI [`Class`] (up to the entire 24 bits, depending on the 165 + /// [`ClassMask`]). 166 + #[inline] 167 + pub const fn from_class_and_vendor( 168 + class: Class, 169 + class_mask: ClassMask, 170 + vendor: Vendor, 171 + ) -> Self { 172 + Self(bindings::pci_device_id { 173 + vendor: vendor.as_raw() as u32, 174 + device: DeviceId::PCI_ANY_ID, 175 + subvendor: DeviceId::PCI_ANY_ID, 176 + subdevice: DeviceId::PCI_ANY_ID, 177 + class: class.as_raw(), 178 + class_mask: class_mask.as_raw(), 163 179 driver_data: 0, 164 180 override_only: 0, 165 181 }) ··· 236 206 /// <MyDriver as pci::Driver>::IdInfo, 237 207 /// [ 238 208 /// ( 239 - /// pci::DeviceId::from_id(bindings::PCI_VENDOR_ID_REDHAT, bindings::PCI_ANY_ID as u32), 209 + /// pci::DeviceId::from_id(pci::Vendor::REDHAT, bindings::PCI_ANY_ID as u32), 240 210 /// (), 241 211 /// ) 242 212 /// ] ··· 270 240 271 241 /// PCI driver probe. 272 242 /// 273 - /// Called when a new platform device is added or discovered. 274 - /// Implementers should attempt to initialize the device here. 243 + /// Called when a new pci device is added or discovered. Implementers should 244 + /// attempt to initialize the device here. 275 245 fn probe(dev: &Device<device::Core>, id_info: &Self::IdInfo) -> Result<Pin<KBox<Self>>>; 276 246 277 - /// Platform driver unbind. 247 + /// PCI driver unbind. 278 248 /// 279 249 /// Called when a [`Device`] is unbound from its bound [`Driver`]. Implementing this callback 280 250 /// is optional. ··· 377 347 // `ioptr` is valid by the safety requirements. 378 348 // `num` is valid by the safety requirements. 379 349 unsafe { 380 - bindings::pci_iounmap(pdev.as_raw(), ioptr as *mut kernel::ffi::c_void); 350 + bindings::pci_iounmap(pdev.as_raw(), ioptr as *mut c_void); 381 351 bindings::pci_release_region(pdev.as_raw(), num); 382 352 } 383 353 } ··· 389 359 } 390 360 391 361 impl Bar { 362 + #[inline] 392 363 fn index_is_valid(index: u32) -> bool { 393 364 // A `struct pci_dev` owns an array of resources with at most `PCI_NUM_RESOURCES` entries. 394 365 index < bindings::PCI_NUM_RESOURCES ··· 412 381 } 413 382 414 383 impl<Ctx: device::DeviceContext> Device<Ctx> { 384 + #[inline] 415 385 fn as_raw(&self) -> *mut bindings::pci_dev { 416 386 self.0.get() 417 387 } 418 388 } 419 389 420 390 impl Device { 421 - /// Returns the PCI vendor ID. 422 - pub fn vendor_id(&self) -> u16 { 391 + /// Returns the PCI vendor ID as [`Vendor`]. 392 + /// 393 + /// # Examples 394 + /// 395 + /// ``` 396 + /// # use kernel::{device::Core, pci::{self, Vendor}, prelude::*}; 397 + /// fn log_device_info(pdev: &pci::Device<Core>) -> Result { 398 + /// // Get an instance of `Vendor`. 399 + /// let vendor = pdev.vendor_id(); 400 + /// dev_info!( 401 + /// pdev.as_ref(), 402 + /// "Device: Vendor={}, Device=0x{:x}\n", 403 + /// vendor, 404 + /// pdev.device_id() 405 + /// ); 406 + /// Ok(()) 407 + /// } 408 + /// ``` 409 + #[inline] 410 + pub fn vendor_id(&self) -> Vendor { 423 411 // SAFETY: `self.as_raw` is a valid pointer to a `struct pci_dev`. 424 - unsafe { (*self.as_raw()).vendor } 412 + let vendor_id = unsafe { (*self.as_raw()).vendor }; 413 + Vendor::from_raw(vendor_id) 425 414 } 426 415 427 416 /// Returns the PCI device ID. 417 + #[inline] 428 418 pub fn device_id(&self) -> u16 { 429 - // SAFETY: `self.as_raw` is a valid pointer to a `struct pci_dev`. 419 + // SAFETY: By its type invariant `self.as_raw` is always a valid pointer to a 420 + // `struct pci_dev`. 430 421 unsafe { (*self.as_raw()).device } 422 + } 423 + 424 + /// Returns the PCI revision ID. 425 + #[inline] 426 + pub fn revision_id(&self) -> u8 { 427 + // SAFETY: By its type invariant `self.as_raw` is always a valid pointer to a 428 + // `struct pci_dev`. 429 + unsafe { (*self.as_raw()).revision } 430 + } 431 + 432 + /// Returns the PCI bus device/function. 433 + #[inline] 434 + pub fn dev_id(&self) -> u16 { 435 + // SAFETY: By its type invariant `self.as_raw` is always a valid pointer to a 436 + // `struct pci_dev`. 437 + unsafe { bindings::pci_dev_id(self.as_raw()) } 438 + } 439 + 440 + /// Returns the PCI subsystem vendor ID. 441 + #[inline] 442 + pub fn subsystem_vendor_id(&self) -> u16 { 443 + // SAFETY: By its type invariant `self.as_raw` is always a valid pointer to a 444 + // `struct pci_dev`. 445 + unsafe { (*self.as_raw()).subsystem_vendor } 446 + } 447 + 448 + /// Returns the PCI subsystem device ID. 449 + #[inline] 450 + pub fn subsystem_device_id(&self) -> u16 { 451 + // SAFETY: By its type invariant `self.as_raw` is always a valid pointer to a 452 + // `struct pci_dev`. 453 + unsafe { (*self.as_raw()).subsystem_device } 454 + } 455 + 456 + /// Returns the start of the given PCI bar resource. 457 + pub fn resource_start(&self, bar: u32) -> Result<bindings::resource_size_t> { 458 + if !Bar::index_is_valid(bar) { 459 + return Err(EINVAL); 460 + } 461 + 462 + // SAFETY: 463 + // - `bar` is a valid bar number, as guaranteed by the above call to `Bar::index_is_valid`, 464 + // - by its type invariant `self.as_raw` is always a valid pointer to a `struct pci_dev`. 465 + Ok(unsafe { bindings::pci_resource_start(self.as_raw(), bar.try_into()?) }) 431 466 } 432 467 433 468 /// Returns the size of the given PCI bar resource. ··· 506 409 // - `bar` is a valid bar number, as guaranteed by the above call to `Bar::index_is_valid`, 507 410 // - by its type invariant `self.as_raw` is always a valid pointer to a `struct pci_dev`. 508 411 Ok(unsafe { bindings::pci_resource_len(self.as_raw(), bar.try_into()?) }) 412 + } 413 + 414 + /// Returns the PCI class as a `Class` struct. 415 + #[inline] 416 + pub fn pci_class(&self) -> Class { 417 + // SAFETY: `self.as_raw` is a valid pointer to a `struct pci_dev`. 418 + Class::from_raw(unsafe { (*self.as_raw()).class }) 509 419 } 510 420 } 511 421 ··· 535 431 ) -> impl PinInit<Devres<Bar>, Error> + 'a { 536 432 self.iomap_region_sized::<0>(bar, name) 537 433 } 434 + 435 + /// Returns an [`IrqRequest`] for the IRQ vector at the given index, if any. 436 + pub fn irq_vector(&self, index: u32) -> Result<IrqRequest<'_>> { 437 + // SAFETY: `self.as_raw` returns a valid pointer to a `struct pci_dev`. 438 + let irq = unsafe { crate::bindings::pci_irq_vector(self.as_raw(), index) }; 439 + if irq < 0 { 440 + return Err(crate::error::Error::from_errno(irq)); 441 + } 442 + // SAFETY: `irq` is guaranteed to be a valid IRQ number for `&self`. 443 + Ok(unsafe { IrqRequest::new(self.as_ref(), irq as u32) }) 444 + } 445 + 446 + /// Returns a [`kernel::irq::Registration`] for the IRQ vector at the given 447 + /// index. 448 + pub fn request_irq<'a, T: crate::irq::Handler + 'static>( 449 + &'a self, 450 + index: u32, 451 + flags: irq::Flags, 452 + name: &'static CStr, 453 + handler: impl PinInit<T, Error> + 'a, 454 + ) -> Result<impl PinInit<irq::Registration<T>, Error> + 'a> { 455 + let request = self.irq_vector(index)?; 456 + 457 + Ok(irq::Registration::<T>::new(request, flags, name, handler)) 458 + } 459 + 460 + /// Returns a [`kernel::irq::ThreadedRegistration`] for the IRQ vector at 461 + /// the given index. 462 + pub fn request_threaded_irq<'a, T: crate::irq::ThreadedHandler + 'static>( 463 + &'a self, 464 + index: u32, 465 + flags: irq::Flags, 466 + name: &'static CStr, 467 + handler: impl PinInit<T, Error> + 'a, 468 + ) -> Result<impl PinInit<irq::ThreadedRegistration<T>, Error> + 'a> { 469 + let request = self.irq_vector(index)?; 470 + 471 + Ok(irq::ThreadedRegistration::<T>::new( 472 + request, flags, name, handler, 473 + )) 474 + } 538 475 } 539 476 540 477 impl Device<device::Core> { ··· 586 441 } 587 442 588 443 /// Enable bus-mastering for this device. 444 + #[inline] 589 445 pub fn set_master(&self) { 590 446 // SAFETY: `self.as_raw` is guaranteed to be a pointer to a valid `struct pci_dev`. 591 447 unsafe { bindings::pci_set_master(self.as_raw()) }; ··· 601 455 impl crate::dma::Device for Device<device::Core> {} 602 456 603 457 // SAFETY: Instances of `Device` are always reference-counted. 604 - unsafe impl crate::types::AlwaysRefCounted for Device { 458 + unsafe impl crate::sync::aref::AlwaysRefCounted for Device { 605 459 fn inc_ref(&self) { 606 460 // SAFETY: The existence of a shared reference guarantees that the refcount is non-zero. 607 461 unsafe { bindings::pci_dev_get(self.as_raw()) };
+578
rust/kernel/pci/id.rs
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + //! PCI device identifiers and related types. 4 + //! 5 + //! This module contains PCI class codes, Vendor IDs, and supporting types. 6 + 7 + use crate::{bindings, error::code::EINVAL, error::Error, prelude::*}; 8 + use core::fmt; 9 + 10 + /// PCI device class codes. 11 + /// 12 + /// Each entry contains the full 24-bit PCI class code (base class in bits 13 + /// 23-16, subclass in bits 15-8, programming interface in bits 7-0). 14 + /// 15 + /// # Examples 16 + /// 17 + /// ``` 18 + /// # use kernel::{device::Core, pci::{self, Class}, prelude::*}; 19 + /// fn probe_device(pdev: &pci::Device<Core>) -> Result { 20 + /// let pci_class = pdev.pci_class(); 21 + /// dev_info!( 22 + /// pdev.as_ref(), 23 + /// "Detected PCI class: {}\n", 24 + /// pci_class 25 + /// ); 26 + /// Ok(()) 27 + /// } 28 + /// ``` 29 + #[derive(Clone, Copy, PartialEq, Eq)] 30 + #[repr(transparent)] 31 + pub struct Class(u32); 32 + 33 + /// PCI class mask constants for matching [`Class`] codes. 34 + #[repr(u32)] 35 + #[derive(Debug, Clone, Copy, PartialEq, Eq)] 36 + pub enum ClassMask { 37 + /// Match the full 24-bit class code. 38 + Full = 0xffffff, 39 + /// Match the upper 16 bits of the class code (base class and subclass only) 40 + ClassSubclass = 0xffff00, 41 + } 42 + 43 + macro_rules! define_all_pci_classes { 44 + ( 45 + $($variant:ident = $binding:expr,)+ 46 + ) => { 47 + impl Class { 48 + $( 49 + #[allow(missing_docs)] 50 + pub const $variant: Self = Self(Self::to_24bit_class($binding)); 51 + )+ 52 + } 53 + 54 + impl fmt::Display for Class { 55 + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 56 + match self { 57 + $( 58 + &Self::$variant => write!(f, stringify!($variant)), 59 + )+ 60 + _ => <Self as fmt::Debug>::fmt(self, f), 61 + } 62 + } 63 + } 64 + }; 65 + } 66 + 67 + /// Once constructed, a [`Class`] contains a valid PCI class code. 68 + impl Class { 69 + /// Create a [`Class`] from a raw 24-bit class code. 70 + #[inline] 71 + pub(super) fn from_raw(class_code: u32) -> Self { 72 + Self(class_code) 73 + } 74 + 75 + /// Get the raw 24-bit class code value. 76 + #[inline] 77 + pub const fn as_raw(self) -> u32 { 78 + self.0 79 + } 80 + 81 + // Converts a PCI class constant to 24-bit format. 82 + // 83 + // Many device drivers use only the upper 16 bits (base class and subclass), 84 + // but some use the full 24 bits. In order to support both cases, store the 85 + // class code as a 24-bit value, where 16-bit values are shifted up 8 bits. 86 + const fn to_24bit_class(val: u32) -> u32 { 87 + if val > 0xFFFF { 88 + val 89 + } else { 90 + val << 8 91 + } 92 + } 93 + } 94 + 95 + impl fmt::Debug for Class { 96 + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 97 + write!(f, "0x{:06x}", self.0) 98 + } 99 + } 100 + 101 + impl ClassMask { 102 + /// Get the raw mask value. 103 + #[inline] 104 + pub const fn as_raw(self) -> u32 { 105 + self as u32 106 + } 107 + } 108 + 109 + impl TryFrom<u32> for ClassMask { 110 + type Error = Error; 111 + 112 + fn try_from(value: u32) -> Result<Self, Self::Error> { 113 + match value { 114 + 0xffffff => Ok(ClassMask::Full), 115 + 0xffff00 => Ok(ClassMask::ClassSubclass), 116 + _ => Err(EINVAL), 117 + } 118 + } 119 + } 120 + 121 + /// PCI vendor IDs. 122 + /// 123 + /// Each entry contains the 16-bit PCI vendor ID as assigned by the PCI SIG. 124 + #[derive(Clone, Copy, PartialEq, Eq)] 125 + #[repr(transparent)] 126 + pub struct Vendor(u16); 127 + 128 + macro_rules! define_all_pci_vendors { 129 + ( 130 + $($variant:ident = $binding:expr,)+ 131 + ) => { 132 + impl Vendor { 133 + $( 134 + #[allow(missing_docs)] 135 + pub const $variant: Self = Self($binding as u16); 136 + )+ 137 + } 138 + 139 + impl fmt::Display for Vendor { 140 + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 141 + match self { 142 + $( 143 + &Self::$variant => write!(f, stringify!($variant)), 144 + )+ 145 + _ => <Self as fmt::Debug>::fmt(self, f), 146 + } 147 + } 148 + } 149 + }; 150 + } 151 + 152 + /// Once constructed, a `Vendor` contains a valid PCI Vendor ID. 153 + impl Vendor { 154 + /// Create a Vendor from a raw 16-bit vendor ID. 155 + #[inline] 156 + pub(super) fn from_raw(vendor_id: u16) -> Self { 157 + Self(vendor_id) 158 + } 159 + 160 + /// Get the raw 16-bit vendor ID value. 161 + #[inline] 162 + pub const fn as_raw(self) -> u16 { 163 + self.0 164 + } 165 + } 166 + 167 + impl fmt::Debug for Vendor { 168 + #[inline] 169 + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { 170 + write!(f, "0x{:04x}", self.0) 171 + } 172 + } 173 + 174 + define_all_pci_classes! { 175 + NOT_DEFINED = bindings::PCI_CLASS_NOT_DEFINED, // 0x000000 176 + NOT_DEFINED_VGA = bindings::PCI_CLASS_NOT_DEFINED_VGA, // 0x000100 177 + 178 + STORAGE_SCSI = bindings::PCI_CLASS_STORAGE_SCSI, // 0x010000 179 + STORAGE_IDE = bindings::PCI_CLASS_STORAGE_IDE, // 0x010100 180 + STORAGE_FLOPPY = bindings::PCI_CLASS_STORAGE_FLOPPY, // 0x010200 181 + STORAGE_IPI = bindings::PCI_CLASS_STORAGE_IPI, // 0x010300 182 + STORAGE_RAID = bindings::PCI_CLASS_STORAGE_RAID, // 0x010400 183 + STORAGE_SATA = bindings::PCI_CLASS_STORAGE_SATA, // 0x010600 184 + STORAGE_SATA_AHCI = bindings::PCI_CLASS_STORAGE_SATA_AHCI, // 0x010601 185 + STORAGE_SAS = bindings::PCI_CLASS_STORAGE_SAS, // 0x010700 186 + STORAGE_EXPRESS = bindings::PCI_CLASS_STORAGE_EXPRESS, // 0x010802 187 + STORAGE_OTHER = bindings::PCI_CLASS_STORAGE_OTHER, // 0x018000 188 + 189 + NETWORK_ETHERNET = bindings::PCI_CLASS_NETWORK_ETHERNET, // 0x020000 190 + NETWORK_TOKEN_RING = bindings::PCI_CLASS_NETWORK_TOKEN_RING, // 0x020100 191 + NETWORK_FDDI = bindings::PCI_CLASS_NETWORK_FDDI, // 0x020200 192 + NETWORK_ATM = bindings::PCI_CLASS_NETWORK_ATM, // 0x020300 193 + NETWORK_OTHER = bindings::PCI_CLASS_NETWORK_OTHER, // 0x028000 194 + 195 + DISPLAY_VGA = bindings::PCI_CLASS_DISPLAY_VGA, // 0x030000 196 + DISPLAY_XGA = bindings::PCI_CLASS_DISPLAY_XGA, // 0x030100 197 + DISPLAY_3D = bindings::PCI_CLASS_DISPLAY_3D, // 0x030200 198 + DISPLAY_OTHER = bindings::PCI_CLASS_DISPLAY_OTHER, // 0x038000 199 + 200 + MULTIMEDIA_VIDEO = bindings::PCI_CLASS_MULTIMEDIA_VIDEO, // 0x040000 201 + MULTIMEDIA_AUDIO = bindings::PCI_CLASS_MULTIMEDIA_AUDIO, // 0x040100 202 + MULTIMEDIA_PHONE = bindings::PCI_CLASS_MULTIMEDIA_PHONE, // 0x040200 203 + MULTIMEDIA_HD_AUDIO = bindings::PCI_CLASS_MULTIMEDIA_HD_AUDIO, // 0x040300 204 + MULTIMEDIA_OTHER = bindings::PCI_CLASS_MULTIMEDIA_OTHER, // 0x048000 205 + 206 + MEMORY_RAM = bindings::PCI_CLASS_MEMORY_RAM, // 0x050000 207 + MEMORY_FLASH = bindings::PCI_CLASS_MEMORY_FLASH, // 0x050100 208 + MEMORY_CXL = bindings::PCI_CLASS_MEMORY_CXL, // 0x050200 209 + MEMORY_OTHER = bindings::PCI_CLASS_MEMORY_OTHER, // 0x058000 210 + 211 + BRIDGE_HOST = bindings::PCI_CLASS_BRIDGE_HOST, // 0x060000 212 + BRIDGE_ISA = bindings::PCI_CLASS_BRIDGE_ISA, // 0x060100 213 + BRIDGE_EISA = bindings::PCI_CLASS_BRIDGE_EISA, // 0x060200 214 + BRIDGE_MC = bindings::PCI_CLASS_BRIDGE_MC, // 0x060300 215 + BRIDGE_PCI_NORMAL = bindings::PCI_CLASS_BRIDGE_PCI_NORMAL, // 0x060400 216 + BRIDGE_PCI_SUBTRACTIVE = bindings::PCI_CLASS_BRIDGE_PCI_SUBTRACTIVE, // 0x060401 217 + BRIDGE_PCMCIA = bindings::PCI_CLASS_BRIDGE_PCMCIA, // 0x060500 218 + BRIDGE_NUBUS = bindings::PCI_CLASS_BRIDGE_NUBUS, // 0x060600 219 + BRIDGE_CARDBUS = bindings::PCI_CLASS_BRIDGE_CARDBUS, // 0x060700 220 + BRIDGE_RACEWAY = bindings::PCI_CLASS_BRIDGE_RACEWAY, // 0x060800 221 + BRIDGE_OTHER = bindings::PCI_CLASS_BRIDGE_OTHER, // 0x068000 222 + 223 + COMMUNICATION_SERIAL = bindings::PCI_CLASS_COMMUNICATION_SERIAL, // 0x070000 224 + COMMUNICATION_PARALLEL = bindings::PCI_CLASS_COMMUNICATION_PARALLEL, // 0x070100 225 + COMMUNICATION_MULTISERIAL = bindings::PCI_CLASS_COMMUNICATION_MULTISERIAL, // 0x070200 226 + COMMUNICATION_MODEM = bindings::PCI_CLASS_COMMUNICATION_MODEM, // 0x070300 227 + COMMUNICATION_OTHER = bindings::PCI_CLASS_COMMUNICATION_OTHER, // 0x078000 228 + 229 + SYSTEM_PIC = bindings::PCI_CLASS_SYSTEM_PIC, // 0x080000 230 + SYSTEM_PIC_IOAPIC = bindings::PCI_CLASS_SYSTEM_PIC_IOAPIC, // 0x080010 231 + SYSTEM_PIC_IOXAPIC = bindings::PCI_CLASS_SYSTEM_PIC_IOXAPIC, // 0x080020 232 + SYSTEM_DMA = bindings::PCI_CLASS_SYSTEM_DMA, // 0x080100 233 + SYSTEM_TIMER = bindings::PCI_CLASS_SYSTEM_TIMER, // 0x080200 234 + SYSTEM_RTC = bindings::PCI_CLASS_SYSTEM_RTC, // 0x080300 235 + SYSTEM_PCI_HOTPLUG = bindings::PCI_CLASS_SYSTEM_PCI_HOTPLUG, // 0x080400 236 + SYSTEM_SDHCI = bindings::PCI_CLASS_SYSTEM_SDHCI, // 0x080500 237 + SYSTEM_RCEC = bindings::PCI_CLASS_SYSTEM_RCEC, // 0x080700 238 + SYSTEM_OTHER = bindings::PCI_CLASS_SYSTEM_OTHER, // 0x088000 239 + 240 + INPUT_KEYBOARD = bindings::PCI_CLASS_INPUT_KEYBOARD, // 0x090000 241 + INPUT_PEN = bindings::PCI_CLASS_INPUT_PEN, // 0x090100 242 + INPUT_MOUSE = bindings::PCI_CLASS_INPUT_MOUSE, // 0x090200 243 + INPUT_SCANNER = bindings::PCI_CLASS_INPUT_SCANNER, // 0x090300 244 + INPUT_GAMEPORT = bindings::PCI_CLASS_INPUT_GAMEPORT, // 0x090400 245 + INPUT_OTHER = bindings::PCI_CLASS_INPUT_OTHER, // 0x098000 246 + 247 + DOCKING_GENERIC = bindings::PCI_CLASS_DOCKING_GENERIC, // 0x0a0000 248 + DOCKING_OTHER = bindings::PCI_CLASS_DOCKING_OTHER, // 0x0a8000 249 + 250 + PROCESSOR_386 = bindings::PCI_CLASS_PROCESSOR_386, // 0x0b0000 251 + PROCESSOR_486 = bindings::PCI_CLASS_PROCESSOR_486, // 0x0b0100 252 + PROCESSOR_PENTIUM = bindings::PCI_CLASS_PROCESSOR_PENTIUM, // 0x0b0200 253 + PROCESSOR_ALPHA = bindings::PCI_CLASS_PROCESSOR_ALPHA, // 0x0b1000 254 + PROCESSOR_POWERPC = bindings::PCI_CLASS_PROCESSOR_POWERPC, // 0x0b2000 255 + PROCESSOR_MIPS = bindings::PCI_CLASS_PROCESSOR_MIPS, // 0x0b3000 256 + PROCESSOR_CO = bindings::PCI_CLASS_PROCESSOR_CO, // 0x0b4000 257 + 258 + SERIAL_FIREWIRE = bindings::PCI_CLASS_SERIAL_FIREWIRE, // 0x0c0000 259 + SERIAL_FIREWIRE_OHCI = bindings::PCI_CLASS_SERIAL_FIREWIRE_OHCI, // 0x0c0010 260 + SERIAL_ACCESS = bindings::PCI_CLASS_SERIAL_ACCESS, // 0x0c0100 261 + SERIAL_SSA = bindings::PCI_CLASS_SERIAL_SSA, // 0x0c0200 262 + SERIAL_USB_UHCI = bindings::PCI_CLASS_SERIAL_USB_UHCI, // 0x0c0300 263 + SERIAL_USB_OHCI = bindings::PCI_CLASS_SERIAL_USB_OHCI, // 0x0c0310 264 + SERIAL_USB_EHCI = bindings::PCI_CLASS_SERIAL_USB_EHCI, // 0x0c0320 265 + SERIAL_USB_XHCI = bindings::PCI_CLASS_SERIAL_USB_XHCI, // 0x0c0330 266 + SERIAL_USB_CDNS = bindings::PCI_CLASS_SERIAL_USB_CDNS, // 0x0c0380 267 + SERIAL_USB_DEVICE = bindings::PCI_CLASS_SERIAL_USB_DEVICE, // 0x0c03fe 268 + SERIAL_FIBER = bindings::PCI_CLASS_SERIAL_FIBER, // 0x0c0400 269 + SERIAL_SMBUS = bindings::PCI_CLASS_SERIAL_SMBUS, // 0x0c0500 270 + SERIAL_IPMI_SMIC = bindings::PCI_CLASS_SERIAL_IPMI_SMIC, // 0x0c0700 271 + SERIAL_IPMI_KCS = bindings::PCI_CLASS_SERIAL_IPMI_KCS, // 0x0c0701 272 + SERIAL_IPMI_BT = bindings::PCI_CLASS_SERIAL_IPMI_BT, // 0x0c0702 273 + 274 + WIRELESS_RF_CONTROLLER = bindings::PCI_CLASS_WIRELESS_RF_CONTROLLER, // 0x0d1000 275 + WIRELESS_WHCI = bindings::PCI_CLASS_WIRELESS_WHCI, // 0x0d1010 276 + 277 + INTELLIGENT_I2O = bindings::PCI_CLASS_INTELLIGENT_I2O, // 0x0e0000 278 + 279 + SATELLITE_TV = bindings::PCI_CLASS_SATELLITE_TV, // 0x0f0000 280 + SATELLITE_AUDIO = bindings::PCI_CLASS_SATELLITE_AUDIO, // 0x0f0100 281 + SATELLITE_VOICE = bindings::PCI_CLASS_SATELLITE_VOICE, // 0x0f0300 282 + SATELLITE_DATA = bindings::PCI_CLASS_SATELLITE_DATA, // 0x0f0400 283 + 284 + CRYPT_NETWORK = bindings::PCI_CLASS_CRYPT_NETWORK, // 0x100000 285 + CRYPT_ENTERTAINMENT = bindings::PCI_CLASS_CRYPT_ENTERTAINMENT, // 0x100100 286 + CRYPT_OTHER = bindings::PCI_CLASS_CRYPT_OTHER, // 0x108000 287 + 288 + SP_DPIO = bindings::PCI_CLASS_SP_DPIO, // 0x110000 289 + SP_OTHER = bindings::PCI_CLASS_SP_OTHER, // 0x118000 290 + 291 + ACCELERATOR_PROCESSING = bindings::PCI_CLASS_ACCELERATOR_PROCESSING, // 0x120000 292 + 293 + OTHERS = bindings::PCI_CLASS_OTHERS, // 0xff0000 294 + } 295 + 296 + define_all_pci_vendors! { 297 + PCI_SIG = bindings::PCI_VENDOR_ID_PCI_SIG, // 0x0001 298 + LOONGSON = bindings::PCI_VENDOR_ID_LOONGSON, // 0x0014 299 + SOLIDIGM = bindings::PCI_VENDOR_ID_SOLIDIGM, // 0x025e 300 + TTTECH = bindings::PCI_VENDOR_ID_TTTECH, // 0x0357 301 + DYNALINK = bindings::PCI_VENDOR_ID_DYNALINK, // 0x0675 302 + UBIQUITI = bindings::PCI_VENDOR_ID_UBIQUITI, // 0x0777 303 + BERKOM = bindings::PCI_VENDOR_ID_BERKOM, // 0x0871 304 + ITTIM = bindings::PCI_VENDOR_ID_ITTIM, // 0x0b48 305 + COMPAQ = bindings::PCI_VENDOR_ID_COMPAQ, // 0x0e11 306 + LSI_LOGIC = bindings::PCI_VENDOR_ID_LSI_LOGIC, // 0x1000 307 + ATI = bindings::PCI_VENDOR_ID_ATI, // 0x1002 308 + VLSI = bindings::PCI_VENDOR_ID_VLSI, // 0x1004 309 + ADL = bindings::PCI_VENDOR_ID_ADL, // 0x1005 310 + NS = bindings::PCI_VENDOR_ID_NS, // 0x100b 311 + TSENG = bindings::PCI_VENDOR_ID_TSENG, // 0x100c 312 + WEITEK = bindings::PCI_VENDOR_ID_WEITEK, // 0x100e 313 + DEC = bindings::PCI_VENDOR_ID_DEC, // 0x1011 314 + CIRRUS = bindings::PCI_VENDOR_ID_CIRRUS, // 0x1013 315 + IBM = bindings::PCI_VENDOR_ID_IBM, // 0x1014 316 + UNISYS = bindings::PCI_VENDOR_ID_UNISYS, // 0x1018 317 + COMPEX2 = bindings::PCI_VENDOR_ID_COMPEX2, // 0x101a 318 + WD = bindings::PCI_VENDOR_ID_WD, // 0x101c 319 + AMI = bindings::PCI_VENDOR_ID_AMI, // 0x101e 320 + AMD = bindings::PCI_VENDOR_ID_AMD, // 0x1022 321 + TRIDENT = bindings::PCI_VENDOR_ID_TRIDENT, // 0x1023 322 + AI = bindings::PCI_VENDOR_ID_AI, // 0x1025 323 + DELL = bindings::PCI_VENDOR_ID_DELL, // 0x1028 324 + MATROX = bindings::PCI_VENDOR_ID_MATROX, // 0x102B 325 + MOBILITY_ELECTRONICS = bindings::PCI_VENDOR_ID_MOBILITY_ELECTRONICS, // 0x14f2 326 + CT = bindings::PCI_VENDOR_ID_CT, // 0x102c 327 + MIRO = bindings::PCI_VENDOR_ID_MIRO, // 0x1031 328 + NEC = bindings::PCI_VENDOR_ID_NEC, // 0x1033 329 + FD = bindings::PCI_VENDOR_ID_FD, // 0x1036 330 + SI = bindings::PCI_VENDOR_ID_SI, // 0x1039 331 + HP = bindings::PCI_VENDOR_ID_HP, // 0x103c 332 + HP_3PAR = bindings::PCI_VENDOR_ID_HP_3PAR, // 0x1590 333 + PCTECH = bindings::PCI_VENDOR_ID_PCTECH, // 0x1042 334 + ASUSTEK = bindings::PCI_VENDOR_ID_ASUSTEK, // 0x1043 335 + DPT = bindings::PCI_VENDOR_ID_DPT, // 0x1044 336 + OPTI = bindings::PCI_VENDOR_ID_OPTI, // 0x1045 337 + ELSA = bindings::PCI_VENDOR_ID_ELSA, // 0x1048 338 + STMICRO = bindings::PCI_VENDOR_ID_STMICRO, // 0x104A 339 + BUSLOGIC = bindings::PCI_VENDOR_ID_BUSLOGIC, // 0x104B 340 + TI = bindings::PCI_VENDOR_ID_TI, // 0x104c 341 + SONY = bindings::PCI_VENDOR_ID_SONY, // 0x104d 342 + WINBOND2 = bindings::PCI_VENDOR_ID_WINBOND2, // 0x1050 343 + ANIGMA = bindings::PCI_VENDOR_ID_ANIGMA, // 0x1051 344 + EFAR = bindings::PCI_VENDOR_ID_EFAR, // 0x1055 345 + MOTOROLA = bindings::PCI_VENDOR_ID_MOTOROLA, // 0x1057 346 + PROMISE = bindings::PCI_VENDOR_ID_PROMISE, // 0x105a 347 + FOXCONN = bindings::PCI_VENDOR_ID_FOXCONN, // 0x105b 348 + UMC = bindings::PCI_VENDOR_ID_UMC, // 0x1060 349 + PICOPOWER = bindings::PCI_VENDOR_ID_PICOPOWER, // 0x1066 350 + MYLEX = bindings::PCI_VENDOR_ID_MYLEX, // 0x1069 351 + APPLE = bindings::PCI_VENDOR_ID_APPLE, // 0x106b 352 + YAMAHA = bindings::PCI_VENDOR_ID_YAMAHA, // 0x1073 353 + QLOGIC = bindings::PCI_VENDOR_ID_QLOGIC, // 0x1077 354 + CYRIX = bindings::PCI_VENDOR_ID_CYRIX, // 0x1078 355 + CONTAQ = bindings::PCI_VENDOR_ID_CONTAQ, // 0x1080 356 + OLICOM = bindings::PCI_VENDOR_ID_OLICOM, // 0x108d 357 + SUN = bindings::PCI_VENDOR_ID_SUN, // 0x108e 358 + NI = bindings::PCI_VENDOR_ID_NI, // 0x1093 359 + CMD = bindings::PCI_VENDOR_ID_CMD, // 0x1095 360 + BROOKTREE = bindings::PCI_VENDOR_ID_BROOKTREE, // 0x109e 361 + SGI = bindings::PCI_VENDOR_ID_SGI, // 0x10a9 362 + WINBOND = bindings::PCI_VENDOR_ID_WINBOND, // 0x10ad 363 + PLX = bindings::PCI_VENDOR_ID_PLX, // 0x10b5 364 + MADGE = bindings::PCI_VENDOR_ID_MADGE, // 0x10b6 365 + THREECOM = bindings::PCI_VENDOR_ID_3COM, // 0x10b7 366 + AL = bindings::PCI_VENDOR_ID_AL, // 0x10b9 367 + NEOMAGIC = bindings::PCI_VENDOR_ID_NEOMAGIC, // 0x10c8 368 + TCONRAD = bindings::PCI_VENDOR_ID_TCONRAD, // 0x10da 369 + ROHM = bindings::PCI_VENDOR_ID_ROHM, // 0x10db 370 + NVIDIA = bindings::PCI_VENDOR_ID_NVIDIA, // 0x10de 371 + IMS = bindings::PCI_VENDOR_ID_IMS, // 0x10e0 372 + AMCC = bindings::PCI_VENDOR_ID_AMCC, // 0x10e8 373 + AMPERE = bindings::PCI_VENDOR_ID_AMPERE, // 0x1def 374 + INTERG = bindings::PCI_VENDOR_ID_INTERG, // 0x10ea 375 + REALTEK = bindings::PCI_VENDOR_ID_REALTEK, // 0x10ec 376 + XILINX = bindings::PCI_VENDOR_ID_XILINX, // 0x10ee 377 + INIT = bindings::PCI_VENDOR_ID_INIT, // 0x1101 378 + CREATIVE = bindings::PCI_VENDOR_ID_CREATIVE, // 0x1102 379 + TTI = bindings::PCI_VENDOR_ID_TTI, // 0x1103 380 + SIGMA = bindings::PCI_VENDOR_ID_SIGMA, // 0x1105 381 + VIA = bindings::PCI_VENDOR_ID_VIA, // 0x1106 382 + SIEMENS = bindings::PCI_VENDOR_ID_SIEMENS, // 0x110A 383 + VORTEX = bindings::PCI_VENDOR_ID_VORTEX, // 0x1119 384 + EF = bindings::PCI_VENDOR_ID_EF, // 0x111a 385 + IDT = bindings::PCI_VENDOR_ID_IDT, // 0x111d 386 + FORE = bindings::PCI_VENDOR_ID_FORE, // 0x1127 387 + PHILIPS = bindings::PCI_VENDOR_ID_PHILIPS, // 0x1131 388 + EICON = bindings::PCI_VENDOR_ID_EICON, // 0x1133 389 + CISCO = bindings::PCI_VENDOR_ID_CISCO, // 0x1137 390 + ZIATECH = bindings::PCI_VENDOR_ID_ZIATECH, // 0x1138 391 + SYSKONNECT = bindings::PCI_VENDOR_ID_SYSKONNECT, // 0x1148 392 + DIGI = bindings::PCI_VENDOR_ID_DIGI, // 0x114f 393 + XIRCOM = bindings::PCI_VENDOR_ID_XIRCOM, // 0x115d 394 + SERVERWORKS = bindings::PCI_VENDOR_ID_SERVERWORKS, // 0x1166 395 + ALTERA = bindings::PCI_VENDOR_ID_ALTERA, // 0x1172 396 + SBE = bindings::PCI_VENDOR_ID_SBE, // 0x1176 397 + TOSHIBA = bindings::PCI_VENDOR_ID_TOSHIBA, // 0x1179 398 + TOSHIBA_2 = bindings::PCI_VENDOR_ID_TOSHIBA_2, // 0x102f 399 + ATTO = bindings::PCI_VENDOR_ID_ATTO, // 0x117c 400 + RICOH = bindings::PCI_VENDOR_ID_RICOH, // 0x1180 401 + DLINK = bindings::PCI_VENDOR_ID_DLINK, // 0x1186 402 + ARTOP = bindings::PCI_VENDOR_ID_ARTOP, // 0x1191 403 + ZEITNET = bindings::PCI_VENDOR_ID_ZEITNET, // 0x1193 404 + FUJITSU_ME = bindings::PCI_VENDOR_ID_FUJITSU_ME, // 0x119e 405 + MARVELL = bindings::PCI_VENDOR_ID_MARVELL, // 0x11ab 406 + MARVELL_EXT = bindings::PCI_VENDOR_ID_MARVELL_EXT, // 0x1b4b 407 + V3 = bindings::PCI_VENDOR_ID_V3, // 0x11b0 408 + ATT = bindings::PCI_VENDOR_ID_ATT, // 0x11c1 409 + SPECIALIX = bindings::PCI_VENDOR_ID_SPECIALIX, // 0x11cb 410 + ANALOG_DEVICES = bindings::PCI_VENDOR_ID_ANALOG_DEVICES, // 0x11d4 411 + ZORAN = bindings::PCI_VENDOR_ID_ZORAN, // 0x11de 412 + COMPEX = bindings::PCI_VENDOR_ID_COMPEX, // 0x11f6 413 + MICROSEMI = bindings::PCI_VENDOR_ID_MICROSEMI, // 0x11f8 414 + RP = bindings::PCI_VENDOR_ID_RP, // 0x11fe 415 + CYCLADES = bindings::PCI_VENDOR_ID_CYCLADES, // 0x120e 416 + ESSENTIAL = bindings::PCI_VENDOR_ID_ESSENTIAL, // 0x120f 417 + O2 = bindings::PCI_VENDOR_ID_O2, // 0x1217 418 + THREEDX = bindings::PCI_VENDOR_ID_3DFX, // 0x121a 419 + AVM = bindings::PCI_VENDOR_ID_AVM, // 0x1244 420 + STALLION = bindings::PCI_VENDOR_ID_STALLION, // 0x124d 421 + AT = bindings::PCI_VENDOR_ID_AT, // 0x1259 422 + ASIX = bindings::PCI_VENDOR_ID_ASIX, // 0x125b 423 + ESS = bindings::PCI_VENDOR_ID_ESS, // 0x125d 424 + SATSAGEM = bindings::PCI_VENDOR_ID_SATSAGEM, // 0x1267 425 + ENSONIQ = bindings::PCI_VENDOR_ID_ENSONIQ, // 0x1274 426 + TRANSMETA = bindings::PCI_VENDOR_ID_TRANSMETA, // 0x1279 427 + ROCKWELL = bindings::PCI_VENDOR_ID_ROCKWELL, // 0x127A 428 + ITE = bindings::PCI_VENDOR_ID_ITE, // 0x1283 429 + ALTEON = bindings::PCI_VENDOR_ID_ALTEON, // 0x12ae 430 + NVIDIA_SGS = bindings::PCI_VENDOR_ID_NVIDIA_SGS, // 0x12d2 431 + PERICOM = bindings::PCI_VENDOR_ID_PERICOM, // 0x12D8 432 + AUREAL = bindings::PCI_VENDOR_ID_AUREAL, // 0x12eb 433 + ELECTRONICDESIGNGMBH = bindings::PCI_VENDOR_ID_ELECTRONICDESIGNGMBH, // 0x12f8 434 + ESDGMBH = bindings::PCI_VENDOR_ID_ESDGMBH, // 0x12fe 435 + CB = bindings::PCI_VENDOR_ID_CB, // 0x1307 436 + SIIG = bindings::PCI_VENDOR_ID_SIIG, // 0x131f 437 + RADISYS = bindings::PCI_VENDOR_ID_RADISYS, // 0x1331 438 + MICRO_MEMORY = bindings::PCI_VENDOR_ID_MICRO_MEMORY, // 0x1332 439 + DOMEX = bindings::PCI_VENDOR_ID_DOMEX, // 0x134a 440 + INTASHIELD = bindings::PCI_VENDOR_ID_INTASHIELD, // 0x135a 441 + QUATECH = bindings::PCI_VENDOR_ID_QUATECH, // 0x135C 442 + SEALEVEL = bindings::PCI_VENDOR_ID_SEALEVEL, // 0x135e 443 + HYPERCOPE = bindings::PCI_VENDOR_ID_HYPERCOPE, // 0x1365 444 + DIGIGRAM = bindings::PCI_VENDOR_ID_DIGIGRAM, // 0x1369 445 + KAWASAKI = bindings::PCI_VENDOR_ID_KAWASAKI, // 0x136b 446 + CNET = bindings::PCI_VENDOR_ID_CNET, // 0x1371 447 + LMC = bindings::PCI_VENDOR_ID_LMC, // 0x1376 448 + NETGEAR = bindings::PCI_VENDOR_ID_NETGEAR, // 0x1385 449 + APPLICOM = bindings::PCI_VENDOR_ID_APPLICOM, // 0x1389 450 + MOXA = bindings::PCI_VENDOR_ID_MOXA, // 0x1393 451 + CCD = bindings::PCI_VENDOR_ID_CCD, // 0x1397 452 + EXAR = bindings::PCI_VENDOR_ID_EXAR, // 0x13a8 453 + MICROGATE = bindings::PCI_VENDOR_ID_MICROGATE, // 0x13c0 454 + THREEWARE = bindings::PCI_VENDOR_ID_3WARE, // 0x13C1 455 + IOMEGA = bindings::PCI_VENDOR_ID_IOMEGA, // 0x13ca 456 + ABOCOM = bindings::PCI_VENDOR_ID_ABOCOM, // 0x13D1 457 + SUNDANCE = bindings::PCI_VENDOR_ID_SUNDANCE, // 0x13f0 458 + CMEDIA = bindings::PCI_VENDOR_ID_CMEDIA, // 0x13f6 459 + ADVANTECH = bindings::PCI_VENDOR_ID_ADVANTECH, // 0x13fe 460 + MEILHAUS = bindings::PCI_VENDOR_ID_MEILHAUS, // 0x1402 461 + LAVA = bindings::PCI_VENDOR_ID_LAVA, // 0x1407 462 + TIMEDIA = bindings::PCI_VENDOR_ID_TIMEDIA, // 0x1409 463 + ICE = bindings::PCI_VENDOR_ID_ICE, // 0x1412 464 + MICROSOFT = bindings::PCI_VENDOR_ID_MICROSOFT, // 0x1414 465 + OXSEMI = bindings::PCI_VENDOR_ID_OXSEMI, // 0x1415 466 + CHELSIO = bindings::PCI_VENDOR_ID_CHELSIO, // 0x1425 467 + EDIMAX = bindings::PCI_VENDOR_ID_EDIMAX, // 0x1432 468 + ADLINK = bindings::PCI_VENDOR_ID_ADLINK, // 0x144a 469 + SAMSUNG = bindings::PCI_VENDOR_ID_SAMSUNG, // 0x144d 470 + GIGABYTE = bindings::PCI_VENDOR_ID_GIGABYTE, // 0x1458 471 + AMBIT = bindings::PCI_VENDOR_ID_AMBIT, // 0x1468 472 + MYRICOM = bindings::PCI_VENDOR_ID_MYRICOM, // 0x14c1 473 + MEDIATEK = bindings::PCI_VENDOR_ID_MEDIATEK, // 0x14c3 474 + TITAN = bindings::PCI_VENDOR_ID_TITAN, // 0x14D2 475 + PANACOM = bindings::PCI_VENDOR_ID_PANACOM, // 0x14d4 476 + SIPACKETS = bindings::PCI_VENDOR_ID_SIPACKETS, // 0x14d9 477 + AFAVLAB = bindings::PCI_VENDOR_ID_AFAVLAB, // 0x14db 478 + AMPLICON = bindings::PCI_VENDOR_ID_AMPLICON, // 0x14dc 479 + BCM_GVC = bindings::PCI_VENDOR_ID_BCM_GVC, // 0x14a4 480 + BROADCOM = bindings::PCI_VENDOR_ID_BROADCOM, // 0x14e4 481 + TOPIC = bindings::PCI_VENDOR_ID_TOPIC, // 0x151f 482 + MAINPINE = bindings::PCI_VENDOR_ID_MAINPINE, // 0x1522 483 + ENE = bindings::PCI_VENDOR_ID_ENE, // 0x1524 484 + SYBA = bindings::PCI_VENDOR_ID_SYBA, // 0x1592 485 + MORETON = bindings::PCI_VENDOR_ID_MORETON, // 0x15aa 486 + VMWARE = bindings::PCI_VENDOR_ID_VMWARE, // 0x15ad 487 + ZOLTRIX = bindings::PCI_VENDOR_ID_ZOLTRIX, // 0x15b0 488 + MELLANOX = bindings::PCI_VENDOR_ID_MELLANOX, // 0x15b3 489 + DFI = bindings::PCI_VENDOR_ID_DFI, // 0x15bd 490 + QUICKNET = bindings::PCI_VENDOR_ID_QUICKNET, // 0x15e2 491 + ADDIDATA = bindings::PCI_VENDOR_ID_ADDIDATA, // 0x15B8 492 + PDC = bindings::PCI_VENDOR_ID_PDC, // 0x15e9 493 + FARSITE = bindings::PCI_VENDOR_ID_FARSITE, // 0x1619 494 + ARIMA = bindings::PCI_VENDOR_ID_ARIMA, // 0x161f 495 + BROCADE = bindings::PCI_VENDOR_ID_BROCADE, // 0x1657 496 + SIBYTE = bindings::PCI_VENDOR_ID_SIBYTE, // 0x166d 497 + ATHEROS = bindings::PCI_VENDOR_ID_ATHEROS, // 0x168c 498 + NETCELL = bindings::PCI_VENDOR_ID_NETCELL, // 0x169c 499 + CENATEK = bindings::PCI_VENDOR_ID_CENATEK, // 0x16CA 500 + SYNOPSYS = bindings::PCI_VENDOR_ID_SYNOPSYS, // 0x16c3 501 + USR = bindings::PCI_VENDOR_ID_USR, // 0x16ec 502 + VITESSE = bindings::PCI_VENDOR_ID_VITESSE, // 0x1725 503 + LINKSYS = bindings::PCI_VENDOR_ID_LINKSYS, // 0x1737 504 + ALTIMA = bindings::PCI_VENDOR_ID_ALTIMA, // 0x173b 505 + CAVIUM = bindings::PCI_VENDOR_ID_CAVIUM, // 0x177d 506 + TECHWELL = bindings::PCI_VENDOR_ID_TECHWELL, // 0x1797 507 + BELKIN = bindings::PCI_VENDOR_ID_BELKIN, // 0x1799 508 + RDC = bindings::PCI_VENDOR_ID_RDC, // 0x17f3 509 + GLI = bindings::PCI_VENDOR_ID_GLI, // 0x17a0 510 + LENOVO = bindings::PCI_VENDOR_ID_LENOVO, // 0x17aa 511 + QCOM = bindings::PCI_VENDOR_ID_QCOM, // 0x17cb 512 + CDNS = bindings::PCI_VENDOR_ID_CDNS, // 0x17cd 513 + ARECA = bindings::PCI_VENDOR_ID_ARECA, // 0x17d3 514 + S2IO = bindings::PCI_VENDOR_ID_S2IO, // 0x17d5 515 + SITECOM = bindings::PCI_VENDOR_ID_SITECOM, // 0x182d 516 + TOPSPIN = bindings::PCI_VENDOR_ID_TOPSPIN, // 0x1867 517 + COMMTECH = bindings::PCI_VENDOR_ID_COMMTECH, // 0x18f7 518 + SILAN = bindings::PCI_VENDOR_ID_SILAN, // 0x1904 519 + RENESAS = bindings::PCI_VENDOR_ID_RENESAS, // 0x1912 520 + SOLARFLARE = bindings::PCI_VENDOR_ID_SOLARFLARE, // 0x1924 521 + TDI = bindings::PCI_VENDOR_ID_TDI, // 0x192E 522 + NXP = bindings::PCI_VENDOR_ID_NXP, // 0x1957 523 + PASEMI = bindings::PCI_VENDOR_ID_PASEMI, // 0x1959 524 + ATTANSIC = bindings::PCI_VENDOR_ID_ATTANSIC, // 0x1969 525 + JMICRON = bindings::PCI_VENDOR_ID_JMICRON, // 0x197B 526 + KORENIX = bindings::PCI_VENDOR_ID_KORENIX, // 0x1982 527 + HUAWEI = bindings::PCI_VENDOR_ID_HUAWEI, // 0x19e5 528 + NETRONOME = bindings::PCI_VENDOR_ID_NETRONOME, // 0x19ee 529 + QMI = bindings::PCI_VENDOR_ID_QMI, // 0x1a32 530 + AZWAVE = bindings::PCI_VENDOR_ID_AZWAVE, // 0x1a3b 531 + REDHAT_QUMRANET = bindings::PCI_VENDOR_ID_REDHAT_QUMRANET, // 0x1af4 532 + ASMEDIA = bindings::PCI_VENDOR_ID_ASMEDIA, // 0x1b21 533 + REDHAT = bindings::PCI_VENDOR_ID_REDHAT, // 0x1b36 534 + WCHIC = bindings::PCI_VENDOR_ID_WCHIC, // 0x1c00 535 + SILICOM_DENMARK = bindings::PCI_VENDOR_ID_SILICOM_DENMARK, // 0x1c2c 536 + AMAZON_ANNAPURNA_LABS = bindings::PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, // 0x1c36 537 + CIRCUITCO = bindings::PCI_VENDOR_ID_CIRCUITCO, // 0x1cc8 538 + AMAZON = bindings::PCI_VENDOR_ID_AMAZON, // 0x1d0f 539 + ZHAOXIN = bindings::PCI_VENDOR_ID_ZHAOXIN, // 0x1d17 540 + ROCKCHIP = bindings::PCI_VENDOR_ID_ROCKCHIP, // 0x1d87 541 + HYGON = bindings::PCI_VENDOR_ID_HYGON, // 0x1d94 542 + META = bindings::PCI_VENDOR_ID_META, // 0x1d9b 543 + FUNGIBLE = bindings::PCI_VENDOR_ID_FUNGIBLE, // 0x1dad 544 + HXT = bindings::PCI_VENDOR_ID_HXT, // 0x1dbf 545 + TEKRAM = bindings::PCI_VENDOR_ID_TEKRAM, // 0x1de1 546 + RPI = bindings::PCI_VENDOR_ID_RPI, // 0x1de4 547 + ALIBABA = bindings::PCI_VENDOR_ID_ALIBABA, // 0x1ded 548 + CXL = bindings::PCI_VENDOR_ID_CXL, // 0x1e98 549 + TEHUTI = bindings::PCI_VENDOR_ID_TEHUTI, // 0x1fc9 550 + SUNIX = bindings::PCI_VENDOR_ID_SUNIX, // 0x1fd4 551 + HINT = bindings::PCI_VENDOR_ID_HINT, // 0x3388 552 + THREEDLABS = bindings::PCI_VENDOR_ID_3DLABS, // 0x3d3d 553 + NETXEN = bindings::PCI_VENDOR_ID_NETXEN, // 0x4040 554 + AKS = bindings::PCI_VENDOR_ID_AKS, // 0x416c 555 + WCHCN = bindings::PCI_VENDOR_ID_WCHCN, // 0x4348 556 + ACCESSIO = bindings::PCI_VENDOR_ID_ACCESSIO, // 0x494f 557 + S3 = bindings::PCI_VENDOR_ID_S3, // 0x5333 558 + DUNORD = bindings::PCI_VENDOR_ID_DUNORD, // 0x5544 559 + DCI = bindings::PCI_VENDOR_ID_DCI, // 0x6666 560 + GLENFLY = bindings::PCI_VENDOR_ID_GLENFLY, // 0x6766 561 + INTEL = bindings::PCI_VENDOR_ID_INTEL, // 0x8086 562 + WANGXUN = bindings::PCI_VENDOR_ID_WANGXUN, // 0x8088 563 + SCALEMP = bindings::PCI_VENDOR_ID_SCALEMP, // 0x8686 564 + COMPUTONE = bindings::PCI_VENDOR_ID_COMPUTONE, // 0x8e0e 565 + KTI = bindings::PCI_VENDOR_ID_KTI, // 0x8e2e 566 + ADAPTEC = bindings::PCI_VENDOR_ID_ADAPTEC, // 0x9004 567 + ADAPTEC2 = bindings::PCI_VENDOR_ID_ADAPTEC2, // 0x9005 568 + HOLTEK = bindings::PCI_VENDOR_ID_HOLTEK, // 0x9412 569 + NETMOS = bindings::PCI_VENDOR_ID_NETMOS, // 0x9710 570 + THREECOM_2 = bindings::PCI_VENDOR_ID_3COM_2, // 0xa727 571 + SOLIDRUN = bindings::PCI_VENDOR_ID_SOLIDRUN, // 0xd063 572 + DIGIUM = bindings::PCI_VENDOR_ID_DIGIUM, // 0xd161 573 + TIGERJET = bindings::PCI_VENDOR_ID_TIGERJET, // 0xe159 574 + XILINX_RME = bindings::PCI_VENDOR_ID_XILINX_RME, // 0xea60 575 + XEN = bindings::PCI_VENDOR_ID_XEN, // 0x5853 576 + OCZ = bindings::PCI_VENDOR_ID_OCZ, // 0x1b85 577 + NCUBE = bindings::PCI_VENDOR_ID_NCUBE, // 0x10ff 578 + }
+177 -1
rust/kernel/platform.rs
··· 10 10 driver, 11 11 error::{from_result, to_result, Result}, 12 12 io::{mem::IoRequest, Resource}, 13 + irq::{self, IrqRequest}, 13 14 of, 14 15 prelude::*, 15 16 types::Opaque, ··· 285 284 } 286 285 } 287 286 287 + macro_rules! define_irq_accessor_by_index { 288 + ( 289 + $(#[$meta:meta])* $fn_name:ident, 290 + $request_fn:ident, 291 + $reg_type:ident, 292 + $handler_trait:ident 293 + ) => { 294 + $(#[$meta])* 295 + pub fn $fn_name<'a, T: irq::$handler_trait + 'static>( 296 + &'a self, 297 + flags: irq::Flags, 298 + index: u32, 299 + name: &'static CStr, 300 + handler: impl PinInit<T, Error> + 'a, 301 + ) -> Result<impl PinInit<irq::$reg_type<T>, Error> + 'a> { 302 + let request = self.$request_fn(index)?; 303 + 304 + Ok(irq::$reg_type::<T>::new( 305 + request, 306 + flags, 307 + name, 308 + handler, 309 + )) 310 + } 311 + }; 312 + } 313 + 314 + macro_rules! define_irq_accessor_by_name { 315 + ( 316 + $(#[$meta:meta])* $fn_name:ident, 317 + $request_fn:ident, 318 + $reg_type:ident, 319 + $handler_trait:ident 320 + ) => { 321 + $(#[$meta])* 322 + pub fn $fn_name<'a, T: irq::$handler_trait + 'static>( 323 + &'a self, 324 + flags: irq::Flags, 325 + irq_name: &CStr, 326 + name: &'static CStr, 327 + handler: impl PinInit<T, Error> + 'a, 328 + ) -> Result<impl PinInit<irq::$reg_type<T>, Error> + 'a> { 329 + let request = self.$request_fn(irq_name)?; 330 + 331 + Ok(irq::$reg_type::<T>::new( 332 + request, 333 + flags, 334 + name, 335 + handler, 336 + )) 337 + } 338 + }; 339 + } 340 + 341 + impl Device<Bound> { 342 + /// Returns an [`IrqRequest`] for the IRQ at the given index, if any. 343 + pub fn irq_by_index(&self, index: u32) -> Result<IrqRequest<'_>> { 344 + // SAFETY: `self.as_raw` returns a valid pointer to a `struct platform_device`. 345 + let irq = unsafe { bindings::platform_get_irq(self.as_raw(), index) }; 346 + 347 + if irq < 0 { 348 + return Err(Error::from_errno(irq)); 349 + } 350 + 351 + // SAFETY: `irq` is guaranteed to be a valid IRQ number for `&self`. 352 + Ok(unsafe { IrqRequest::new(self.as_ref(), irq as u32) }) 353 + } 354 + 355 + /// Returns an [`IrqRequest`] for the IRQ at the given index, but does not 356 + /// print an error if the IRQ cannot be obtained. 357 + pub fn optional_irq_by_index(&self, index: u32) -> Result<IrqRequest<'_>> { 358 + // SAFETY: `self.as_raw` returns a valid pointer to a `struct platform_device`. 359 + let irq = unsafe { bindings::platform_get_irq_optional(self.as_raw(), index) }; 360 + 361 + if irq < 0 { 362 + return Err(Error::from_errno(irq)); 363 + } 364 + 365 + // SAFETY: `irq` is guaranteed to be a valid IRQ number for `&self`. 366 + Ok(unsafe { IrqRequest::new(self.as_ref(), irq as u32) }) 367 + } 368 + 369 + /// Returns an [`IrqRequest`] for the IRQ with the given name, if any. 370 + pub fn irq_by_name(&self, name: &CStr) -> Result<IrqRequest<'_>> { 371 + // SAFETY: `self.as_raw` returns a valid pointer to a `struct platform_device`. 372 + let irq = unsafe { bindings::platform_get_irq_byname(self.as_raw(), name.as_char_ptr()) }; 373 + 374 + if irq < 0 { 375 + return Err(Error::from_errno(irq)); 376 + } 377 + 378 + // SAFETY: `irq` is guaranteed to be a valid IRQ number for `&self`. 379 + Ok(unsafe { IrqRequest::new(self.as_ref(), irq as u32) }) 380 + } 381 + 382 + /// Returns an [`IrqRequest`] for the IRQ with the given name, but does not 383 + /// print an error if the IRQ cannot be obtained. 384 + pub fn optional_irq_by_name(&self, name: &CStr) -> Result<IrqRequest<'_>> { 385 + // SAFETY: `self.as_raw` returns a valid pointer to a `struct platform_device`. 386 + let irq = unsafe { 387 + bindings::platform_get_irq_byname_optional(self.as_raw(), name.as_char_ptr()) 388 + }; 389 + 390 + if irq < 0 { 391 + return Err(Error::from_errno(irq)); 392 + } 393 + 394 + // SAFETY: `irq` is guaranteed to be a valid IRQ number for `&self`. 395 + Ok(unsafe { IrqRequest::new(self.as_ref(), irq as u32) }) 396 + } 397 + 398 + define_irq_accessor_by_index!( 399 + /// Returns a [`irq::Registration`] for the IRQ at the given index. 400 + request_irq_by_index, 401 + irq_by_index, 402 + Registration, 403 + Handler 404 + ); 405 + define_irq_accessor_by_name!( 406 + /// Returns a [`irq::Registration`] for the IRQ with the given name. 407 + request_irq_by_name, 408 + irq_by_name, 409 + Registration, 410 + Handler 411 + ); 412 + define_irq_accessor_by_index!( 413 + /// Does the same as [`Self::request_irq_by_index`], except that it does 414 + /// not print an error message if the IRQ cannot be obtained. 415 + request_optional_irq_by_index, 416 + optional_irq_by_index, 417 + Registration, 418 + Handler 419 + ); 420 + define_irq_accessor_by_name!( 421 + /// Does the same as [`Self::request_irq_by_name`], except that it does 422 + /// not print an error message if the IRQ cannot be obtained. 423 + request_optional_irq_by_name, 424 + optional_irq_by_name, 425 + Registration, 426 + Handler 427 + ); 428 + 429 + define_irq_accessor_by_index!( 430 + /// Returns a [`irq::ThreadedRegistration`] for the IRQ at the given index. 431 + request_threaded_irq_by_index, 432 + irq_by_index, 433 + ThreadedRegistration, 434 + ThreadedHandler 435 + ); 436 + define_irq_accessor_by_name!( 437 + /// Returns a [`irq::ThreadedRegistration`] for the IRQ with the given name. 438 + request_threaded_irq_by_name, 439 + irq_by_name, 440 + ThreadedRegistration, 441 + ThreadedHandler 442 + ); 443 + define_irq_accessor_by_index!( 444 + /// Does the same as [`Self::request_threaded_irq_by_index`], except 445 + /// that it does not print an error message if the IRQ cannot be 446 + /// obtained. 447 + request_optional_threaded_irq_by_index, 448 + optional_irq_by_index, 449 + ThreadedRegistration, 450 + ThreadedHandler 451 + ); 452 + define_irq_accessor_by_name!( 453 + /// Does the same as [`Self::request_threaded_irq_by_name`], except that 454 + /// it does not print an error message if the IRQ cannot be obtained. 455 + request_optional_threaded_irq_by_name, 456 + optional_irq_by_name, 457 + ThreadedRegistration, 458 + ThreadedHandler 459 + ); 460 + } 461 + 288 462 // SAFETY: `Device` is a transparent wrapper of a type that doesn't depend on `Device`'s generic 289 463 // argument. 290 464 kernel::impl_device_context_deref!(unsafe { Device }); ··· 468 292 impl crate::dma::Device for Device<device::Core> {} 469 293 470 294 // SAFETY: Instances of `Device` are always reference-counted. 471 - unsafe impl crate::types::AlwaysRefCounted for Device { 295 + unsafe impl crate::sync::aref::AlwaysRefCounted for Device { 472 296 fn inc_ref(&self) { 473 297 // SAFETY: The existence of a shared reference guarantees that the refcount is non-zero. 474 298 unsafe { bindings::get_device(self.as_ref().as_raw()) };
+14
rust/kernel/processor.rs
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + //! Processor related primitives. 4 + //! 5 + //! C header: [`include/linux/processor.h`](srctree/include/linux/processor.h) 6 + 7 + /// Lower CPU power consumption or yield to a hyperthreaded twin processor. 8 + /// 9 + /// It also happens to serve as a compiler barrier. 10 + #[inline] 11 + pub fn cpu_relax() { 12 + // SAFETY: Always safe to call. 13 + unsafe { bindings::cpu_relax() } 14 + }
+22
samples/rust/Kconfig
··· 62 62 63 63 If unsure, say N. 64 64 65 + config SAMPLE_RUST_DEBUGFS 66 + tristate "DebugFS Test Module" 67 + depends on DEBUG_FS 68 + help 69 + This option builds the Rust DebugFS Test module sample. 70 + 71 + To compile this as a module, choose M here: 72 + the module will be called rust_debugfs. 73 + 74 + If unsure, say N. 75 + 76 + config SAMPLE_RUST_DEBUGFS_SCOPED 77 + tristate "Scoped DebugFS Test Module" 78 + depends on DEBUG_FS 79 + help 80 + This option builds the Rust Scoped DebugFS Test module sample. 81 + 82 + To compile this as a module, choose M here: 83 + the module will be called rust_debugfs_scoped. 84 + 85 + If unsure, say N. 86 + 65 87 config SAMPLE_RUST_DRIVER_PCI 66 88 tristate "PCI Driver" 67 89 depends on PCI
+2
samples/rust/Makefile
··· 4 4 obj-$(CONFIG_SAMPLE_RUST_MINIMAL) += rust_minimal.o 5 5 obj-$(CONFIG_SAMPLE_RUST_MISC_DEVICE) += rust_misc_device.o 6 6 obj-$(CONFIG_SAMPLE_RUST_PRINT) += rust_print.o 7 + obj-$(CONFIG_SAMPLE_RUST_DEBUGFS) += rust_debugfs.o 8 + obj-$(CONFIG_SAMPLE_RUST_DEBUGFS_SCOPED) += rust_debugfs_scoped.o 7 9 obj-$(CONFIG_SAMPLE_RUST_DMA) += rust_dma.o 8 10 obj-$(CONFIG_SAMPLE_RUST_DRIVER_PCI) += rust_driver_pci.o 9 11 obj-$(CONFIG_SAMPLE_RUST_DRIVER_PLATFORM) += rust_driver_platform.o
+151
samples/rust/rust_debugfs.rs
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + // Copyright (C) 2025 Google LLC. 4 + 5 + //! Sample DebugFS exporting platform driver 6 + //! 7 + //! To successfully probe this driver with ACPI, use an ssdt that looks like 8 + //! 9 + //! ```dsl 10 + //! DefinitionBlock ("", "SSDT", 2, "TEST", "VIRTACPI", 0x00000001) 11 + //! { 12 + //! Scope (\_SB) 13 + //! { 14 + //! Device (T432) 15 + //! { 16 + //! Name (_HID, "LNUXBEEF") // ACPI hardware ID to match 17 + //! Name (_UID, 1) 18 + //! Name (_STA, 0x0F) // Device present, enabled 19 + //! Name (_DSD, Package () { // Sample attribute 20 + //! ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"), 21 + //! Package() { 22 + //! Package(2) {"compatible", "sample-debugfs"} 23 + //! } 24 + //! }) 25 + //! Name (_CRS, ResourceTemplate () 26 + //! { 27 + //! Memory32Fixed (ReadWrite, 0xFED00000, 0x1000) 28 + //! }) 29 + //! } 30 + //! } 31 + //! } 32 + //! ``` 33 + 34 + use core::str::FromStr; 35 + use core::sync::atomic::AtomicUsize; 36 + use core::sync::atomic::Ordering; 37 + use kernel::c_str; 38 + use kernel::debugfs::{Dir, File}; 39 + use kernel::new_mutex; 40 + use kernel::prelude::*; 41 + use kernel::sync::Mutex; 42 + 43 + use kernel::{acpi, device::Core, of, platform, str::CString, types::ARef}; 44 + 45 + kernel::module_platform_driver! { 46 + type: RustDebugFs, 47 + name: "rust_debugfs", 48 + authors: ["Matthew Maurer"], 49 + description: "Rust DebugFS usage sample", 50 + license: "GPL", 51 + } 52 + 53 + #[pin_data] 54 + struct RustDebugFs { 55 + pdev: ARef<platform::Device>, 56 + // As we only hold these for drop effect (to remove the directory/files) we have a leading 57 + // underscore to indicate to the compiler that we don't expect to use this field directly. 58 + _debugfs: Dir, 59 + #[pin] 60 + _compatible: File<CString>, 61 + #[pin] 62 + counter: File<AtomicUsize>, 63 + #[pin] 64 + inner: File<Mutex<Inner>>, 65 + } 66 + 67 + #[derive(Debug)] 68 + struct Inner { 69 + x: u32, 70 + y: u32, 71 + } 72 + 73 + impl FromStr for Inner { 74 + type Err = Error; 75 + fn from_str(s: &str) -> Result<Self> { 76 + let mut parts = s.split_whitespace(); 77 + let x = parts 78 + .next() 79 + .ok_or(EINVAL)? 80 + .parse::<u32>() 81 + .map_err(|_| EINVAL)?; 82 + let y = parts 83 + .next() 84 + .ok_or(EINVAL)? 85 + .parse::<u32>() 86 + .map_err(|_| EINVAL)?; 87 + if parts.next().is_some() { 88 + return Err(EINVAL); 89 + } 90 + Ok(Inner { x, y }) 91 + } 92 + } 93 + 94 + kernel::acpi_device_table!( 95 + ACPI_TABLE, 96 + MODULE_ACPI_TABLE, 97 + <RustDebugFs as platform::Driver>::IdInfo, 98 + [(acpi::DeviceId::new(c_str!("LNUXBEEF")), ())] 99 + ); 100 + 101 + impl platform::Driver for RustDebugFs { 102 + type IdInfo = (); 103 + const OF_ID_TABLE: Option<of::IdTable<Self::IdInfo>> = None; 104 + const ACPI_ID_TABLE: Option<acpi::IdTable<Self::IdInfo>> = Some(&ACPI_TABLE); 105 + 106 + fn probe( 107 + pdev: &platform::Device<Core>, 108 + _info: Option<&Self::IdInfo>, 109 + ) -> Result<Pin<KBox<Self>>> { 110 + let result = KBox::try_pin_init(RustDebugFs::new(pdev), GFP_KERNEL)?; 111 + // We can still mutate fields through the files which are atomic or mutexed: 112 + result.counter.store(91, Ordering::Relaxed); 113 + { 114 + let mut guard = result.inner.lock(); 115 + guard.x = guard.y; 116 + guard.y = 42; 117 + } 118 + Ok(result) 119 + } 120 + } 121 + 122 + impl RustDebugFs { 123 + fn build_counter(dir: &Dir) -> impl PinInit<File<AtomicUsize>> + '_ { 124 + dir.read_write_file(c_str!("counter"), AtomicUsize::new(0)) 125 + } 126 + 127 + fn build_inner(dir: &Dir) -> impl PinInit<File<Mutex<Inner>>> + '_ { 128 + dir.read_write_file(c_str!("pair"), new_mutex!(Inner { x: 3, y: 10 })) 129 + } 130 + 131 + fn new(pdev: &platform::Device<Core>) -> impl PinInit<Self, Error> + '_ { 132 + let debugfs = Dir::new(c_str!("sample_debugfs")); 133 + let dev = pdev.as_ref(); 134 + 135 + try_pin_init! { 136 + Self { 137 + _compatible <- debugfs.read_only_file( 138 + c_str!("compatible"), 139 + dev.fwnode() 140 + .ok_or(ENOENT)? 141 + .property_read::<CString>(c_str!("compatible")) 142 + .required_by(dev)?, 143 + ), 144 + counter <- Self::build_counter(&debugfs), 145 + inner <- Self::build_inner(&debugfs), 146 + _debugfs: debugfs, 147 + pdev: pdev.into(), 148 + } 149 + } 150 + } 151 + }
+134
samples/rust/rust_debugfs_scoped.rs
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + // Copyright (C) 2025 Google LLC. 4 + 5 + //! Sample DebugFS exporting platform driver that demonstrates the use of 6 + //! `Scope::dir` to create a variety of files without the need to separately 7 + //! track them all. 8 + 9 + use core::sync::atomic::AtomicUsize; 10 + use kernel::debugfs::{Dir, Scope}; 11 + use kernel::prelude::*; 12 + use kernel::sync::Mutex; 13 + use kernel::{c_str, new_mutex, str::CString}; 14 + 15 + module! { 16 + type: RustScopedDebugFs, 17 + name: "rust_debugfs_scoped", 18 + authors: ["Matthew Maurer"], 19 + description: "Rust Scoped DebugFS usage sample", 20 + license: "GPL", 21 + } 22 + 23 + fn remove_file_write( 24 + mod_data: &ModuleData, 25 + reader: &mut kernel::uaccess::UserSliceReader, 26 + ) -> Result { 27 + let mut buf = [0u8; 128]; 28 + if reader.len() >= buf.len() { 29 + return Err(EINVAL); 30 + } 31 + let n = reader.len(); 32 + reader.read_slice(&mut buf[..n])?; 33 + 34 + let s = core::str::from_utf8(&buf[..n]).map_err(|_| EINVAL)?.trim(); 35 + let nul_idx = s.len(); 36 + buf[nul_idx] = 0; 37 + let to_remove = CStr::from_bytes_with_nul(&buf[..nul_idx + 1]).map_err(|_| EINVAL)?; 38 + mod_data 39 + .devices 40 + .lock() 41 + .retain(|device| device.name.as_bytes() != to_remove.as_bytes()); 42 + Ok(()) 43 + } 44 + 45 + fn create_file_write( 46 + mod_data: &ModuleData, 47 + reader: &mut kernel::uaccess::UserSliceReader, 48 + ) -> Result { 49 + let mut buf = [0u8; 128]; 50 + if reader.len() > buf.len() { 51 + return Err(EINVAL); 52 + } 53 + let n = reader.len(); 54 + reader.read_slice(&mut buf[..n])?; 55 + 56 + let mut nums = KVec::new(); 57 + 58 + let s = core::str::from_utf8(&buf[..n]).map_err(|_| EINVAL)?.trim(); 59 + let mut items = s.split_whitespace(); 60 + let name_str = items.next().ok_or(EINVAL)?; 61 + let name = CString::try_from_fmt(fmt!("{name_str}"))?; 62 + let file_name = CString::try_from_fmt(fmt!("{name_str}"))?; 63 + for sub in items { 64 + nums.push( 65 + AtomicUsize::new(sub.parse().map_err(|_| EINVAL)?), 66 + GFP_KERNEL, 67 + )?; 68 + } 69 + 70 + let scope = KBox::pin_init( 71 + mod_data 72 + .device_dir 73 + .scope(DeviceData { name, nums }, &file_name, |dev_data, dir| { 74 + for (idx, val) in dev_data.nums.iter().enumerate() { 75 + let Ok(name) = CString::try_from_fmt(fmt!("{idx}")) else { 76 + return; 77 + }; 78 + dir.read_write_file(&name, val); 79 + } 80 + }), 81 + GFP_KERNEL, 82 + )?; 83 + (*mod_data.devices.lock()).push(scope, GFP_KERNEL)?; 84 + 85 + Ok(()) 86 + } 87 + 88 + struct RustScopedDebugFs { 89 + _data: Pin<KBox<Scope<ModuleData>>>, 90 + } 91 + 92 + #[pin_data] 93 + struct ModuleData { 94 + device_dir: Dir, 95 + #[pin] 96 + devices: Mutex<KVec<Pin<KBox<Scope<DeviceData>>>>>, 97 + } 98 + 99 + impl ModuleData { 100 + fn init(device_dir: Dir) -> impl PinInit<Self> { 101 + pin_init! { 102 + Self { 103 + device_dir: device_dir, 104 + devices <- new_mutex!(KVec::new()) 105 + } 106 + } 107 + } 108 + } 109 + 110 + struct DeviceData { 111 + name: CString, 112 + nums: KVec<AtomicUsize>, 113 + } 114 + 115 + fn init_control(base_dir: &Dir, dyn_dirs: Dir) -> impl PinInit<Scope<ModuleData>> + '_ { 116 + base_dir.scope( 117 + ModuleData::init(dyn_dirs), 118 + c_str!("control"), 119 + |data, dir| { 120 + dir.write_only_callback_file(c_str!("create"), data, &create_file_write); 121 + dir.write_only_callback_file(c_str!("remove"), data, &remove_file_write); 122 + }, 123 + ) 124 + } 125 + 126 + impl kernel::Module for RustScopedDebugFs { 127 + fn init(_module: &'static kernel::ThisModule) -> Result<Self> { 128 + let base_dir = Dir::new(c_str!("rust_scoped_debugfs")); 129 + let dyn_dirs = base_dir.subdir(c_str!("dynamic")); 130 + Ok(Self { 131 + _data: KBox::pin_init(init_control(&base_dir, dyn_dirs), GFP_KERNEL)?, 132 + }) 133 + } 134 + }
+1 -5
samples/rust/rust_dma.rs
··· 5 5 //! To make this driver probe, QEMU must be run with `-device pci-testdev`. 6 6 7 7 use kernel::{ 8 - bindings, 9 8 device::Core, 10 9 dma::{CoherentAllocation, Device, DmaMask}, 11 10 pci, ··· 44 45 PCI_TABLE, 45 46 MODULE_PCI_TABLE, 46 47 <DmaSampleDriver as pci::Driver>::IdInfo, 47 - [( 48 - pci::DeviceId::from_id(bindings::PCI_VENDOR_ID_REDHAT, 0x5), 49 - () 50 - )] 48 + [(pci::DeviceId::from_id(pci::Vendor::REDHAT, 0x5), ())] 51 49 ); 52 50 53 51 impl pci::Driver for DmaSampleDriver {
+5 -7
samples/rust/rust_driver_auxiliary.rs
··· 5 5 //! To make this driver probe, QEMU must be run with `-device pci-testdev`. 6 6 7 7 use kernel::{ 8 - auxiliary, bindings, c_str, device::Core, driver, error::Error, pci, prelude::*, InPlaceModule, 8 + auxiliary, c_str, device::Core, driver, error::Error, pci, prelude::*, InPlaceModule, 9 9 }; 10 10 11 11 use pin_init::PinInit; ··· 50 50 PCI_TABLE, 51 51 MODULE_PCI_TABLE, 52 52 <ParentDriver as pci::Driver>::IdInfo, 53 - [( 54 - pci::DeviceId::from_id(bindings::PCI_VENDOR_ID_REDHAT, 0x5), 55 - () 56 - )] 53 + [(pci::DeviceId::from_id(pci::Vendor::REDHAT, 0x5), ())] 57 54 ); 58 55 59 56 impl pci::Driver for ParentDriver { ··· 78 81 let parent = adev.parent().ok_or(EINVAL)?; 79 82 let pdev: &pci::Device = parent.try_into()?; 80 83 84 + let vendor = pdev.vendor_id(); 81 85 dev_info!( 82 86 adev.as_ref(), 83 - "Connect auxiliary {} with parent: VendorID={:#x}, DeviceID={:#x}\n", 87 + "Connect auxiliary {} with parent: VendorID={}, DeviceID={:#x}\n", 84 88 adev.id(), 85 - pdev.vendor_id(), 89 + vendor, 86 90 pdev.device_id() 87 91 ); 88 92
+5 -4
samples/rust/rust_driver_pci.rs
··· 4 4 //! 5 5 //! To make this driver probe, QEMU must be run with `-device pci-testdev`. 6 6 7 - use kernel::{bindings, c_str, device::Core, devres::Devres, pci, prelude::*, types::ARef}; 7 + use kernel::{c_str, device::Core, devres::Devres, pci, prelude::*, sync::aref::ARef}; 8 8 9 9 struct Regs; 10 10 ··· 38 38 MODULE_PCI_TABLE, 39 39 <SampleDriver as pci::Driver>::IdInfo, 40 40 [( 41 - pci::DeviceId::from_id(bindings::PCI_VENDOR_ID_REDHAT, 0x5), 41 + pci::DeviceId::from_id(pci::Vendor::REDHAT, 0x5), 42 42 TestIndex::NO_EVENTFD 43 43 )] 44 44 ); ··· 66 66 const ID_TABLE: pci::IdTable<Self::IdInfo> = &PCI_TABLE; 67 67 68 68 fn probe(pdev: &pci::Device<Core>, info: &Self::IdInfo) -> Result<Pin<KBox<Self>>> { 69 + let vendor = pdev.vendor_id(); 69 70 dev_dbg!( 70 71 pdev.as_ref(), 71 - "Probe Rust PCI driver sample (PCI ID: 0x{:x}, 0x{:x}).\n", 72 - pdev.vendor_id(), 72 + "Probe Rust PCI driver sample (PCI ID: {}, 0x{:x}).\n", 73 + vendor, 73 74 pdev.device_id() 74 75 ); 75 76
+1 -1
samples/rust/rust_driver_platform.rs
··· 72 72 of, platform, 73 73 prelude::*, 74 74 str::CString, 75 - types::ARef, 75 + sync::aref::ARef, 76 76 }; 77 77 78 78 struct SampleDriver {