Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * LSM initialization functions
4 */
5
6#define pr_fmt(fmt) "LSM: " fmt
7
8#include <linux/init.h>
9#include <linux/lsm_hooks.h>
10
11#include "lsm.h"
12
13/* LSM enabled constants. */
14static __initdata int lsm_enabled_true = 1;
15static __initdata int lsm_enabled_false = 0;
16
17/* Pointers to LSM sections defined in include/asm-generic/vmlinux.lds.h */
18extern struct lsm_info __start_lsm_info[], __end_lsm_info[];
19extern struct lsm_info __start_early_lsm_info[], __end_early_lsm_info[];
20
21/* Number of "early" LSMs */
22static __initdata unsigned int lsm_count_early;
23
24/* Build and boot-time LSM ordering. */
25static __initconst const char *const lsm_order_builtin = CONFIG_LSM;
26static __initdata const char *lsm_order_cmdline;
27static __initdata const char *lsm_order_legacy;
28
29/* Ordered list of LSMs to initialize. */
30static __initdata struct lsm_info *lsm_order[MAX_LSM_COUNT + 1];
31static __initdata struct lsm_info *lsm_exclusive;
32
33#define lsm_order_for_each(iter) \
34 for ((iter) = lsm_order; *(iter); (iter)++)
35#define lsm_for_each_raw(iter) \
36 for ((iter) = __start_lsm_info; \
37 (iter) < __end_lsm_info; (iter)++)
38#define lsm_early_for_each_raw(iter) \
39 for ((iter) = __start_early_lsm_info; \
40 (iter) < __end_early_lsm_info; (iter)++)
41
42#define lsm_initcall(level) \
43 ({ \
44 int _r, _rc = 0; \
45 struct lsm_info **_lp, *_l; \
46 lsm_order_for_each(_lp) { \
47 _l = *_lp; \
48 if (!_l->initcall_##level) \
49 continue; \
50 lsm_pr_dbg("running %s %s initcall", \
51 _l->id->name, #level); \
52 _r = _l->initcall_##level(); \
53 if (_r) { \
54 pr_warn("failed LSM %s %s initcall with errno %d\n", \
55 _l->id->name, #level, _r); \
56 if (!_rc) \
57 _rc = _r; \
58 } \
59 } \
60 _rc; \
61 })
62
63/**
64 * lsm_choose_security - Legacy "major" LSM selection
65 * @str: kernel command line parameter
66 */
67static int __init lsm_choose_security(char *str)
68{
69 lsm_order_legacy = str;
70 return 1;
71}
72__setup("security=", lsm_choose_security);
73
74/**
75 * lsm_choose_lsm - Modern LSM selection
76 * @str: kernel command line parameter
77 */
78static int __init lsm_choose_lsm(char *str)
79{
80 lsm_order_cmdline = str;
81 return 1;
82}
83__setup("lsm=", lsm_choose_lsm);
84
85/**
86 * lsm_debug_enable - Enable LSM framework debugging
87 * @str: kernel command line parameter
88 *
89 * Currently we only provide debug info during LSM initialization, but we may
90 * want to expand this in the future.
91 */
92static int __init lsm_debug_enable(char *str)
93{
94 lsm_debug = true;
95 return 1;
96}
97__setup("lsm.debug", lsm_debug_enable);
98
99/**
100 * lsm_enabled_set - Mark a LSM as enabled
101 * @lsm: LSM definition
102 * @enabled: enabled flag
103 */
104static void __init lsm_enabled_set(struct lsm_info *lsm, bool enabled)
105{
106 /*
107 * When an LSM hasn't configured an enable variable, we can use
108 * a hard-coded location for storing the default enabled state.
109 */
110 if (!lsm->enabled ||
111 lsm->enabled == &lsm_enabled_true ||
112 lsm->enabled == &lsm_enabled_false) {
113 lsm->enabled = enabled ? &lsm_enabled_true : &lsm_enabled_false;
114 } else {
115 *lsm->enabled = enabled;
116 }
117}
118
119/**
120 * lsm_is_enabled - Determine if a LSM is enabled
121 * @lsm: LSM definition
122 */
123static inline bool lsm_is_enabled(struct lsm_info *lsm)
124{
125 return (lsm->enabled ? *lsm->enabled : false);
126}
127
128/**
129 * lsm_order_exists - Determine if a LSM exists in the ordered list
130 * @lsm: LSM definition
131 */
132static bool __init lsm_order_exists(struct lsm_info *lsm)
133{
134 struct lsm_info **check;
135
136 lsm_order_for_each(check) {
137 if (*check == lsm)
138 return true;
139 }
140
141 return false;
142}
143
144/**
145 * lsm_order_append - Append a LSM to the ordered list
146 * @lsm: LSM definition
147 * @src: source of the addition
148 *
149 * Append @lsm to the enabled LSM array after ensuring that it hasn't been
150 * explicitly disabled, is a duplicate entry, or would run afoul of the
151 * LSM_FLAG_EXCLUSIVE logic.
152 */
153static void __init lsm_order_append(struct lsm_info *lsm, const char *src)
154{
155 /* Ignore duplicate selections. */
156 if (lsm_order_exists(lsm))
157 return;
158
159 /* Skip explicitly disabled LSMs. */
160 if (lsm->enabled && !lsm_is_enabled(lsm)) {
161 lsm_pr_dbg("skip previously disabled LSM %s:%s\n",
162 src, lsm->id->name);
163 return;
164 }
165
166 if (lsm_active_cnt == MAX_LSM_COUNT) {
167 pr_warn("exceeded maximum LSM count on %s:%s\n",
168 src, lsm->id->name);
169 lsm_enabled_set(lsm, false);
170 return;
171 }
172
173 if (lsm->flags & LSM_FLAG_EXCLUSIVE) {
174 if (lsm_exclusive) {
175 lsm_pr_dbg("skip exclusive LSM conflict %s:%s\n",
176 src, lsm->id->name);
177 lsm_enabled_set(lsm, false);
178 return;
179 } else {
180 lsm_pr_dbg("select exclusive LSM %s:%s\n",
181 src, lsm->id->name);
182 lsm_exclusive = lsm;
183 }
184 }
185
186 lsm_enabled_set(lsm, true);
187 lsm_order[lsm_active_cnt] = lsm;
188 lsm_idlist[lsm_active_cnt++] = lsm->id;
189
190 lsm_pr_dbg("enabling LSM %s:%s\n", src, lsm->id->name);
191}
192
193/**
194 * lsm_order_parse - Parse the comma delimited LSM list
195 * @list: LSM list
196 * @src: source of the list
197 */
198static void __init lsm_order_parse(const char *list, const char *src)
199{
200 struct lsm_info *lsm;
201 char *sep, *name, *next;
202
203 /* Handle any Legacy LSM exclusions if one was specified. */
204 if (lsm_order_legacy) {
205 /*
206 * To match the original "security=" behavior, this explicitly
207 * does NOT fallback to another Legacy Major if the selected
208 * one was separately disabled: disable all non-matching
209 * Legacy Major LSMs.
210 */
211 lsm_for_each_raw(lsm) {
212 if ((lsm->flags & LSM_FLAG_LEGACY_MAJOR) &&
213 strcmp(lsm->id->name, lsm_order_legacy)) {
214 lsm_enabled_set(lsm, false);
215 lsm_pr_dbg("skip legacy LSM conflict %s:%s\n",
216 src, lsm->id->name);
217 }
218 }
219 }
220
221 /* LSM_ORDER_FIRST */
222 lsm_for_each_raw(lsm) {
223 if (lsm->order == LSM_ORDER_FIRST)
224 lsm_order_append(lsm, "first");
225 }
226
227 /* Normal or "mutable" LSMs */
228 sep = kstrdup(list, GFP_KERNEL);
229 next = sep;
230 /* Walk the list, looking for matching LSMs. */
231 while ((name = strsep(&next, ",")) != NULL) {
232 lsm_for_each_raw(lsm) {
233 if (!strcmp(lsm->id->name, name) &&
234 lsm->order == LSM_ORDER_MUTABLE)
235 lsm_order_append(lsm, src);
236 }
237 }
238 kfree(sep);
239
240 /* Legacy LSM if specified. */
241 if (lsm_order_legacy) {
242 lsm_for_each_raw(lsm) {
243 if (!strcmp(lsm->id->name, lsm_order_legacy))
244 lsm_order_append(lsm, src);
245 }
246 }
247
248 /* LSM_ORDER_LAST */
249 lsm_for_each_raw(lsm) {
250 if (lsm->order == LSM_ORDER_LAST)
251 lsm_order_append(lsm, "last");
252 }
253
254 /* Disable all LSMs not previously enabled. */
255 lsm_for_each_raw(lsm) {
256 if (lsm_order_exists(lsm))
257 continue;
258 lsm_enabled_set(lsm, false);
259 lsm_pr_dbg("skip disabled LSM %s:%s\n", src, lsm->id->name);
260 }
261}
262
263/**
264 * lsm_blob_size_update - Update the LSM blob size and offset information
265 * @sz_req: the requested additional blob size
266 * @sz_cur: the existing blob size
267 */
268static void __init lsm_blob_size_update(unsigned int *sz_req,
269 unsigned int *sz_cur)
270{
271 unsigned int offset;
272
273 if (*sz_req == 0)
274 return;
275
276 offset = ALIGN(*sz_cur, sizeof(void *));
277 *sz_cur = offset + *sz_req;
278 *sz_req = offset;
279}
280
281/**
282 * lsm_prepare - Prepare the LSM framework for a new LSM
283 * @lsm: LSM definition
284 */
285static void __init lsm_prepare(struct lsm_info *lsm)
286{
287 struct lsm_blob_sizes *blobs = lsm->blobs;
288
289 if (!blobs)
290 return;
291
292 /* Register the LSM blob sizes. */
293 blobs = lsm->blobs;
294 lsm_blob_size_update(&blobs->lbs_cred, &blob_sizes.lbs_cred);
295 lsm_blob_size_update(&blobs->lbs_file, &blob_sizes.lbs_file);
296 lsm_blob_size_update(&blobs->lbs_backing_file,
297 &blob_sizes.lbs_backing_file);
298 lsm_blob_size_update(&blobs->lbs_ib, &blob_sizes.lbs_ib);
299 /* inode blob gets an rcu_head in addition to LSM blobs. */
300 if (blobs->lbs_inode && blob_sizes.lbs_inode == 0)
301 blob_sizes.lbs_inode = sizeof(struct rcu_head);
302 lsm_blob_size_update(&blobs->lbs_inode, &blob_sizes.lbs_inode);
303 lsm_blob_size_update(&blobs->lbs_ipc, &blob_sizes.lbs_ipc);
304 lsm_blob_size_update(&blobs->lbs_key, &blob_sizes.lbs_key);
305 lsm_blob_size_update(&blobs->lbs_msg_msg, &blob_sizes.lbs_msg_msg);
306 lsm_blob_size_update(&blobs->lbs_perf_event,
307 &blob_sizes.lbs_perf_event);
308 lsm_blob_size_update(&blobs->lbs_sock, &blob_sizes.lbs_sock);
309 lsm_blob_size_update(&blobs->lbs_superblock,
310 &blob_sizes.lbs_superblock);
311 lsm_blob_size_update(&blobs->lbs_task, &blob_sizes.lbs_task);
312 lsm_blob_size_update(&blobs->lbs_tun_dev, &blob_sizes.lbs_tun_dev);
313 lsm_blob_size_update(&blobs->lbs_xattr_count,
314 &blob_sizes.lbs_xattr_count);
315 lsm_blob_size_update(&blobs->lbs_bdev, &blob_sizes.lbs_bdev);
316 lsm_blob_size_update(&blobs->lbs_bpf_map, &blob_sizes.lbs_bpf_map);
317 lsm_blob_size_update(&blobs->lbs_bpf_prog, &blob_sizes.lbs_bpf_prog);
318 lsm_blob_size_update(&blobs->lbs_bpf_token, &blob_sizes.lbs_bpf_token);
319}
320
321/**
322 * lsm_init_single - Initialize a given LSM
323 * @lsm: LSM definition
324 */
325static void __init lsm_init_single(struct lsm_info *lsm)
326{
327 int ret;
328
329 if (!lsm_is_enabled(lsm))
330 return;
331
332 lsm_pr_dbg("initializing %s\n", lsm->id->name);
333 ret = lsm->init();
334 WARN(ret, "%s failed to initialize: %d\n", lsm->id->name, ret);
335}
336
337/**
338 * lsm_static_call_init - Initialize a LSM's static calls
339 * @hl: LSM hook list
340 */
341static int __init lsm_static_call_init(struct security_hook_list *hl)
342{
343 struct lsm_static_call *scall = hl->scalls;
344 int i;
345
346 for (i = 0; i < MAX_LSM_COUNT; i++) {
347 /* Update the first static call that is not used yet */
348 if (!scall->hl) {
349 __static_call_update(scall->key, scall->trampoline,
350 hl->hook.lsm_func_addr);
351 scall->hl = hl;
352 static_branch_enable(scall->active);
353 return 0;
354 }
355 scall++;
356 }
357
358 return -ENOSPC;
359}
360
361/**
362 * security_add_hooks - Add a LSM's hooks to the LSM framework's hook lists
363 * @hooks: LSM hooks to add
364 * @count: number of hooks to add
365 * @lsmid: identification information for the LSM
366 *
367 * Each LSM has to register its hooks with the LSM framework.
368 */
369void __init security_add_hooks(struct security_hook_list *hooks, int count,
370 const struct lsm_id *lsmid)
371{
372 int i;
373
374 for (i = 0; i < count; i++) {
375 hooks[i].lsmid = lsmid;
376 if (lsm_static_call_init(&hooks[i]))
377 panic("exhausted LSM callback slots with LSM %s\n",
378 lsmid->name);
379 }
380}
381
382/**
383 * early_security_init - Initialize the early LSMs
384 */
385int __init early_security_init(void)
386{
387 struct lsm_info *lsm;
388
389 /* NOTE: lsm_pr_dbg() doesn't work here as lsm_debug is not yet set */
390
391 lsm_early_for_each_raw(lsm) {
392 lsm_enabled_set(lsm, true);
393 lsm_order_append(lsm, "early");
394 lsm_prepare(lsm);
395 lsm_init_single(lsm);
396 lsm_count_early++;
397 }
398
399 return 0;
400}
401
402/**
403 * security_init - Initializes the LSM framework
404 *
405 * This should be called early in the kernel initialization sequence.
406 */
407int __init security_init(void)
408{
409 unsigned int cnt;
410 struct lsm_info **lsm;
411
412 if (lsm_debug) {
413 struct lsm_info *i;
414
415 cnt = 0;
416 lsm_pr("available LSMs: ");
417 lsm_early_for_each_raw(i)
418 lsm_pr_cont("%s%s(E)", (cnt++ ? "," : ""), i->id->name);
419 lsm_for_each_raw(i)
420 lsm_pr_cont("%s%s", (cnt++ ? "," : ""), i->id->name);
421 lsm_pr_cont("\n");
422
423 lsm_pr("built-in LSM config: %s\n", lsm_order_builtin);
424
425 lsm_pr("legacy LSM parameter: %s\n", lsm_order_legacy);
426 lsm_pr("boot LSM parameter: %s\n", lsm_order_cmdline);
427
428 /* see the note about lsm_pr_dbg() in early_security_init() */
429 lsm_early_for_each_raw(i)
430 lsm_pr("enabled LSM early:%s\n", i->id->name);
431 }
432
433 if (lsm_order_cmdline) {
434 if (lsm_order_legacy)
435 lsm_order_legacy = NULL;
436 lsm_order_parse(lsm_order_cmdline, "cmdline");
437 } else
438 lsm_order_parse(lsm_order_builtin, "builtin");
439
440 lsm_order_for_each(lsm)
441 lsm_prepare(*lsm);
442
443 if (lsm_debug) {
444 lsm_pr("blob(cred) size %d\n", blob_sizes.lbs_cred);
445 lsm_pr("blob(file) size %d\n", blob_sizes.lbs_file);
446 lsm_pr("blob(backing_file) size %d\n",
447 blob_sizes.lbs_backing_file);
448 lsm_pr("blob(ib) size %d\n", blob_sizes.lbs_ib);
449 lsm_pr("blob(inode) size %d\n", blob_sizes.lbs_inode);
450 lsm_pr("blob(ipc) size %d\n", blob_sizes.lbs_ipc);
451 lsm_pr("blob(key) size %d\n", blob_sizes.lbs_key);
452 lsm_pr("blob(msg_msg)_size %d\n", blob_sizes.lbs_msg_msg);
453 lsm_pr("blob(sock) size %d\n", blob_sizes.lbs_sock);
454 lsm_pr("blob(superblock) size %d\n", blob_sizes.lbs_superblock);
455 lsm_pr("blob(perf_event) size %d\n", blob_sizes.lbs_perf_event);
456 lsm_pr("blob(task) size %d\n", blob_sizes.lbs_task);
457 lsm_pr("blob(tun_dev) size %d\n", blob_sizes.lbs_tun_dev);
458 lsm_pr("blob(xattr) count %d\n", blob_sizes.lbs_xattr_count);
459 lsm_pr("blob(bdev) size %d\n", blob_sizes.lbs_bdev);
460 lsm_pr("blob(bpf_map) size %d\n", blob_sizes.lbs_bpf_map);
461 lsm_pr("blob(bpf_prog) size %d\n", blob_sizes.lbs_bpf_prog);
462 lsm_pr("blob(bpf_token) size %d\n", blob_sizes.lbs_bpf_token);
463 }
464
465 if (blob_sizes.lbs_file)
466 lsm_file_cache = kmem_cache_create("lsm_file_cache",
467 blob_sizes.lbs_file, 0,
468 SLAB_PANIC, NULL);
469 if (blob_sizes.lbs_backing_file)
470 lsm_backing_file_cache = kmem_cache_create(
471 "lsm_backing_file_cache",
472 blob_sizes.lbs_backing_file,
473 0, SLAB_PANIC, NULL);
474 if (blob_sizes.lbs_inode)
475 lsm_inode_cache = kmem_cache_create("lsm_inode_cache",
476 blob_sizes.lbs_inode, 0,
477 SLAB_PANIC, NULL);
478
479 if (lsm_cred_alloc((struct cred *)unrcu_pointer(current->cred),
480 GFP_KERNEL))
481 panic("early LSM cred alloc failed\n");
482 if (lsm_task_alloc(current))
483 panic("early LSM task alloc failed\n");
484
485 cnt = 0;
486 lsm_order_for_each(lsm) {
487 /* skip the "early" LSMs as they have already been setup */
488 if (cnt++ < lsm_count_early)
489 continue;
490 lsm_init_single(*lsm);
491 }
492
493 return 0;
494}
495
496/**
497 * security_initcall_pure - Run the LSM pure initcalls
498 */
499static int __init security_initcall_pure(void)
500{
501 return lsm_initcall(pure);
502}
503pure_initcall(security_initcall_pure);
504
505/**
506 * security_initcall_early - Run the LSM early initcalls
507 */
508static int __init security_initcall_early(void)
509{
510 return lsm_initcall(early);
511}
512early_initcall(security_initcall_early);
513
514/**
515 * security_initcall_core - Run the LSM core initcalls
516 */
517static int __init security_initcall_core(void)
518{
519 int rc_sfs, rc_lsm;
520
521 rc_sfs = securityfs_init();
522 rc_lsm = lsm_initcall(core);
523
524 return (rc_sfs ? rc_sfs : rc_lsm);
525}
526core_initcall(security_initcall_core);
527
528/**
529 * security_initcall_subsys - Run the LSM subsys initcalls
530 */
531static int __init security_initcall_subsys(void)
532{
533 return lsm_initcall(subsys);
534}
535subsys_initcall(security_initcall_subsys);
536
537/**
538 * security_initcall_fs - Run the LSM fs initcalls
539 */
540static int __init security_initcall_fs(void)
541{
542 return lsm_initcall(fs);
543}
544fs_initcall(security_initcall_fs);
545
546/**
547 * security_initcall_device - Run the LSM device initcalls
548 */
549static int __init security_initcall_device(void)
550{
551 return lsm_initcall(device);
552}
553device_initcall(security_initcall_device);
554
555/**
556 * security_initcall_late - Run the LSM late initcalls
557 */
558static int __init security_initcall_late(void)
559{
560 int rc;
561
562 rc = lsm_initcall(late);
563 lsm_pr_dbg("all enabled LSMs fully activated\n");
564 call_blocking_lsm_notifier(LSM_STARTED_ALL, NULL);
565
566 return rc;
567}
568late_initcall(security_initcall_late);