Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * devtmpfs - kernel-maintained tmpfs-based /dev
4 *
5 * Copyright (C) 2009, Kay Sievers <kay.sievers@vrfy.org>
6 *
7 * During bootup, before any driver core device is registered,
8 * devtmpfs, a tmpfs-based filesystem is created. Every driver-core
9 * device which requests a device node, will add a node in this
10 * filesystem.
11 * By default, all devices are named after the name of the device,
12 * owned by root and have a default mode of 0600. Subsystems can
13 * overwrite the default setting if needed.
14 */
15
16#define pr_fmt(fmt) "devtmpfs: " fmt
17
18#include <linux/kernel.h>
19#include <linux/syscalls.h>
20#include <linux/mount.h>
21#include <linux/device.h>
22#include <linux/blkdev.h>
23#include <linux/namei.h>
24#include <linux/fs.h>
25#include <linux/shmem_fs.h>
26#include <linux/ramfs.h>
27#include <linux/sched.h>
28#include <linux/slab.h>
29#include <linux/kthread.h>
30#include <linux/init_syscalls.h>
31#include <uapi/linux/mount.h>
32#include "base.h"
33
34#ifdef CONFIG_DEVTMPFS_SAFE
35#define DEVTMPFS_MFLAGS (MS_SILENT | MS_NOEXEC | MS_NOSUID)
36#else
37#define DEVTMPFS_MFLAGS (MS_SILENT)
38#endif
39
40static struct task_struct *thread;
41
42static int __initdata mount_dev = IS_ENABLED(CONFIG_DEVTMPFS_MOUNT);
43
44static DEFINE_SPINLOCK(req_lock);
45
46static struct req {
47 struct req *next;
48 struct completion done;
49 int err;
50 const char *name;
51 umode_t mode; /* 0 => delete */
52 kuid_t uid;
53 kgid_t gid;
54 struct device *dev;
55} *requests;
56
57static int __init mount_param(char *str)
58{
59 return kstrtoint(str, 0, &mount_dev) == 0;
60}
61__setup("devtmpfs.mount=", mount_param);
62
63static struct vfsmount *mnt;
64
65static struct file_system_type internal_fs_type = {
66 .name = "devtmpfs",
67#ifdef CONFIG_TMPFS
68 .init_fs_context = shmem_init_fs_context,
69#else
70 .init_fs_context = ramfs_init_fs_context,
71#endif
72 .kill_sb = kill_anon_super,
73};
74
75/* Simply take a ref on the existing mount */
76static int devtmpfs_get_tree(struct fs_context *fc)
77{
78 struct super_block *sb = mnt->mnt_sb;
79
80 atomic_inc(&sb->s_active);
81 down_write(&sb->s_umount);
82 fc->root = dget(sb->s_root);
83 return 0;
84}
85
86/* Ops are filled in during init depending on underlying shmem or ramfs type */
87static struct fs_context_operations devtmpfs_context_ops = {};
88
89/* Call the underlying initialization and set to our ops */
90static int devtmpfs_init_fs_context(struct fs_context *fc)
91{
92 int ret;
93#ifdef CONFIG_TMPFS
94 ret = shmem_init_fs_context(fc);
95#else
96 ret = ramfs_init_fs_context(fc);
97#endif
98 if (ret < 0)
99 return ret;
100
101 fc->ops = &devtmpfs_context_ops;
102
103 return 0;
104}
105
106static struct file_system_type dev_fs_type = {
107 .name = "devtmpfs",
108 .init_fs_context = devtmpfs_init_fs_context,
109};
110
111static int devtmpfs_submit_req(struct req *req, const char *tmp)
112{
113 init_completion(&req->done);
114
115 spin_lock(&req_lock);
116 req->next = requests;
117 requests = req;
118 spin_unlock(&req_lock);
119
120 wake_up_process(thread);
121 wait_for_completion(&req->done);
122
123 kfree(tmp);
124
125 return req->err;
126}
127
128int devtmpfs_create_node(struct device *dev)
129{
130 const char *tmp = NULL;
131 struct req req;
132
133 if (!thread)
134 return 0;
135
136 req.mode = 0;
137 req.uid = GLOBAL_ROOT_UID;
138 req.gid = GLOBAL_ROOT_GID;
139 req.name = device_get_devnode(dev, &req.mode, &req.uid, &req.gid, &tmp);
140 if (!req.name)
141 return -ENOMEM;
142
143 if (req.mode == 0)
144 req.mode = 0600;
145 if (is_blockdev(dev))
146 req.mode |= S_IFBLK;
147 else
148 req.mode |= S_IFCHR;
149
150 req.dev = dev;
151
152 return devtmpfs_submit_req(&req, tmp);
153}
154
155int devtmpfs_delete_node(struct device *dev)
156{
157 const char *tmp = NULL;
158 struct req req;
159
160 if (!thread)
161 return 0;
162
163 req.name = device_get_devnode(dev, NULL, NULL, NULL, &tmp);
164 if (!req.name)
165 return -ENOMEM;
166
167 req.mode = 0;
168 req.dev = dev;
169
170 return devtmpfs_submit_req(&req, tmp);
171}
172
173static int dev_mkdir(const char *name, umode_t mode)
174{
175 struct dentry *dentry;
176 struct path path;
177
178 dentry = start_creating_path(AT_FDCWD, name, &path, LOOKUP_DIRECTORY);
179 if (IS_ERR(dentry))
180 return PTR_ERR(dentry);
181
182 dentry = vfs_mkdir(&nop_mnt_idmap, d_inode(path.dentry), dentry, mode, NULL);
183 if (!IS_ERR(dentry))
184 /* mark as kernel-created inode */
185 d_inode(dentry)->i_private = &thread;
186 end_creating_path(&path, dentry);
187 return PTR_ERR_OR_ZERO(dentry);
188}
189
190static int create_path(const char *nodepath)
191{
192 char *path;
193 char *s;
194 int err = 0;
195
196 /* parent directories do not exist, create them */
197 path = kstrdup(nodepath, GFP_KERNEL);
198 if (!path)
199 return -ENOMEM;
200
201 s = path;
202 for (;;) {
203 s = strchr(s, '/');
204 if (!s)
205 break;
206 s[0] = '\0';
207 err = dev_mkdir(path, 0755);
208 if (err && err != -EEXIST)
209 break;
210 s[0] = '/';
211 s++;
212 }
213 kfree(path);
214 return err;
215}
216
217static int handle_create(const char *nodename, umode_t mode, kuid_t uid,
218 kgid_t gid, struct device *dev)
219{
220 struct dentry *dentry;
221 struct path path;
222 int err;
223
224 dentry = start_creating_path(AT_FDCWD, nodename, &path, 0);
225 if (dentry == ERR_PTR(-ENOENT)) {
226 create_path(nodename);
227 dentry = start_creating_path(AT_FDCWD, nodename, &path, 0);
228 }
229 if (IS_ERR(dentry))
230 return PTR_ERR(dentry);
231
232 err = vfs_mknod(&nop_mnt_idmap, d_inode(path.dentry), dentry, mode,
233 dev->devt, NULL);
234 if (!err) {
235 struct iattr newattrs;
236
237 newattrs.ia_mode = mode;
238 newattrs.ia_uid = uid;
239 newattrs.ia_gid = gid;
240 newattrs.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID;
241 inode_lock(d_inode(dentry));
242 notify_change(&nop_mnt_idmap, dentry, &newattrs, NULL);
243 inode_unlock(d_inode(dentry));
244
245 /* mark as kernel-created inode */
246 d_inode(dentry)->i_private = &thread;
247 }
248 end_creating_path(&path, dentry);
249 return err;
250}
251
252static int dev_rmdir(const char *name)
253{
254 struct path parent;
255 struct dentry *dentry;
256 int err;
257
258 dentry = start_removing_path(name, &parent);
259 if (IS_ERR(dentry))
260 return PTR_ERR(dentry);
261 if (d_inode(dentry)->i_private == &thread)
262 err = vfs_rmdir(&nop_mnt_idmap, d_inode(parent.dentry),
263 dentry, NULL);
264 else
265 err = -EPERM;
266
267 end_removing_path(&parent, dentry);
268 return err;
269}
270
271static int delete_path(const char *nodepath)
272{
273 char *path;
274 int err = 0;
275
276 path = kstrdup(nodepath, GFP_KERNEL);
277 if (!path)
278 return -ENOMEM;
279
280 for (;;) {
281 char *base;
282
283 base = strrchr(path, '/');
284 if (!base)
285 break;
286 base[0] = '\0';
287 err = dev_rmdir(path);
288 if (err)
289 break;
290 }
291
292 kfree(path);
293 return err;
294}
295
296static int dev_mynode(struct device *dev, struct inode *inode)
297{
298 /* did we create it */
299 if (inode->i_private != &thread)
300 return 0;
301
302 /* does the dev_t match */
303 if (is_blockdev(dev)) {
304 if (!S_ISBLK(inode->i_mode))
305 return 0;
306 } else {
307 if (!S_ISCHR(inode->i_mode))
308 return 0;
309 }
310 if (inode->i_rdev != dev->devt)
311 return 0;
312
313 /* ours */
314 return 1;
315}
316
317static int handle_remove(const char *nodename, struct device *dev)
318{
319 struct path parent;
320 struct dentry *dentry;
321 struct inode *inode;
322 int deleted = 0;
323 int err = 0;
324
325 dentry = start_removing_path(nodename, &parent);
326 if (IS_ERR(dentry))
327 return PTR_ERR(dentry);
328
329 inode = d_inode(dentry);
330 if (dev_mynode(dev, inode)) {
331 struct iattr newattrs;
332 /*
333 * before unlinking this node, reset permissions
334 * of possible references like hardlinks
335 */
336 newattrs.ia_uid = GLOBAL_ROOT_UID;
337 newattrs.ia_gid = GLOBAL_ROOT_GID;
338 newattrs.ia_mode = inode->i_mode & ~0777;
339 newattrs.ia_valid =
340 ATTR_UID|ATTR_GID|ATTR_MODE;
341 inode_lock(d_inode(dentry));
342 notify_change(&nop_mnt_idmap, dentry, &newattrs, NULL);
343 inode_unlock(d_inode(dentry));
344 err = vfs_unlink(&nop_mnt_idmap, d_inode(parent.dentry),
345 dentry, NULL);
346 if (!err || err == -ENOENT)
347 deleted = 1;
348 }
349 end_removing_path(&parent, dentry);
350
351 if (deleted && strchr(nodename, '/'))
352 delete_path(nodename);
353 return err;
354}
355
356/*
357 * If configured, or requested by the commandline, devtmpfs will be
358 * auto-mounted after the kernel mounted the root filesystem.
359 */
360int __init devtmpfs_mount(void)
361{
362 int err;
363
364 if (!mount_dev)
365 return 0;
366
367 if (!thread)
368 return 0;
369
370 err = init_mount("devtmpfs", "dev", "devtmpfs", DEVTMPFS_MFLAGS, NULL);
371 if (err)
372 pr_info("error mounting %d\n", err);
373 else
374 pr_info("mounted\n");
375 return err;
376}
377
378static __initdata DECLARE_COMPLETION(setup_done);
379
380static int handle(const char *name, umode_t mode, kuid_t uid, kgid_t gid,
381 struct device *dev)
382{
383 if (mode)
384 return handle_create(name, mode, uid, gid, dev);
385 else
386 return handle_remove(name, dev);
387}
388
389static void __noreturn devtmpfs_work_loop(void)
390{
391 while (1) {
392 spin_lock(&req_lock);
393 while (requests) {
394 struct req *req = requests;
395 requests = NULL;
396 spin_unlock(&req_lock);
397 while (req) {
398 struct req *next = req->next;
399 req->err = handle(req->name, req->mode,
400 req->uid, req->gid, req->dev);
401 complete(&req->done);
402 req = next;
403 }
404 spin_lock(&req_lock);
405 }
406 __set_current_state(TASK_INTERRUPTIBLE);
407 spin_unlock(&req_lock);
408 schedule();
409 }
410}
411
412static noinline int __init devtmpfs_setup(void *p)
413{
414 int err;
415
416 err = ksys_unshare(CLONE_NEWNS);
417 if (err)
418 goto out;
419 err = init_mount("devtmpfs", "/", "devtmpfs", DEVTMPFS_MFLAGS, NULL);
420 if (err)
421 goto out;
422 init_chdir("/.."); /* will traverse into overmounted root */
423 init_chroot(".");
424out:
425 *(int *)p = err;
426 return err;
427}
428
429/*
430 * The __ref is because devtmpfs_setup needs to be __init for the routines it
431 * calls. That call is done while devtmpfs_init, which is marked __init,
432 * synchronously waits for it to complete.
433 */
434static int __ref devtmpfsd(void *p)
435{
436 int err = devtmpfs_setup(p);
437
438 complete(&setup_done);
439 if (err)
440 return err;
441 devtmpfs_work_loop();
442 return 0;
443}
444
445/*
446 * Get the underlying (shmem/ramfs) context ops to build ours
447 */
448static int devtmpfs_configure_context(void)
449{
450 struct fs_context *fc;
451
452 fc = fs_context_for_reconfigure(mnt->mnt_root, mnt->mnt_sb->s_flags,
453 MS_RMT_MASK);
454 if (IS_ERR(fc))
455 return PTR_ERR(fc);
456
457 /* Set up devtmpfs_context_ops based on underlying type */
458 devtmpfs_context_ops.free = fc->ops->free;
459 devtmpfs_context_ops.dup = fc->ops->dup;
460 devtmpfs_context_ops.parse_param = fc->ops->parse_param;
461 devtmpfs_context_ops.parse_monolithic = fc->ops->parse_monolithic;
462 devtmpfs_context_ops.get_tree = &devtmpfs_get_tree;
463 devtmpfs_context_ops.reconfigure = fc->ops->reconfigure;
464
465 put_fs_context(fc);
466
467 return 0;
468}
469
470/*
471 * Create devtmpfs instance, driver-core devices will add their device
472 * nodes here.
473 */
474int __init devtmpfs_init(void)
475{
476 char opts[] = "mode=0755";
477 int err;
478
479 mnt = vfs_kern_mount(&internal_fs_type, 0, "devtmpfs", opts);
480 if (IS_ERR(mnt)) {
481 pr_err("unable to create devtmpfs %ld\n", PTR_ERR(mnt));
482 return PTR_ERR(mnt);
483 }
484
485 err = devtmpfs_configure_context();
486 if (err) {
487 pr_err("unable to configure devtmpfs type %d\n", err);
488 return err;
489 }
490
491 err = register_filesystem(&dev_fs_type);
492 if (err) {
493 pr_err("unable to register devtmpfs type %d\n", err);
494 return err;
495 }
496
497 thread = kthread_run(devtmpfsd, &err, "kdevtmpfs");
498 if (!IS_ERR(thread)) {
499 wait_for_completion(&setup_done);
500 } else {
501 err = PTR_ERR(thread);
502 thread = NULL;
503 }
504
505 if (err) {
506 pr_err("unable to create devtmpfs %d\n", err);
507 unregister_filesystem(&dev_fs_type);
508 thread = NULL;
509 return err;
510 }
511
512 pr_info("initialized\n");
513 return 0;
514}