Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * kexec.c - kexec system call core code.
4 * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com>
5 */
6
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9#include <linux/btf.h>
10#include <linux/capability.h>
11#include <linux/mm.h>
12#include <linux/file.h>
13#include <linux/slab.h>
14#include <linux/fs.h>
15#include <linux/kexec.h>
16#include <linux/mutex.h>
17#include <linux/list.h>
18#include <linux/liveupdate.h>
19#include <linux/highmem.h>
20#include <linux/syscalls.h>
21#include <linux/reboot.h>
22#include <linux/ioport.h>
23#include <linux/hardirq.h>
24#include <linux/elf.h>
25#include <linux/elfcore.h>
26#include <linux/utsname.h>
27#include <linux/numa.h>
28#include <linux/suspend.h>
29#include <linux/device.h>
30#include <linux/freezer.h>
31#include <linux/panic_notifier.h>
32#include <linux/pm.h>
33#include <linux/cpu.h>
34#include <linux/uaccess.h>
35#include <linux/io.h>
36#include <linux/console.h>
37#include <linux/vmalloc.h>
38#include <linux/swap.h>
39#include <linux/syscore_ops.h>
40#include <linux/compiler.h>
41#include <linux/hugetlb.h>
42#include <linux/objtool.h>
43#include <linux/kmsg_dump.h>
44#include <linux/dma-map-ops.h>
45#include <linux/sysfs.h>
46
47#include <asm/page.h>
48#include <asm/sections.h>
49
50#include "kexec_internal.h"
51
52atomic_t __kexec_lock = ATOMIC_INIT(0);
53
54/* Flag to indicate we are going to kexec a new kernel */
55bool kexec_in_progress = false;
56
57bool kexec_file_dbg_print;
58
59/*
60 * When kexec transitions to the new kernel there is a one-to-one
61 * mapping between physical and virtual addresses. On processors
62 * where you can disable the MMU this is trivial, and easy. For
63 * others it is still a simple predictable page table to setup.
64 *
65 * In that environment kexec copies the new kernel to its final
66 * resting place. This means I can only support memory whose
67 * physical address can fit in an unsigned long. In particular
68 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
69 * If the assembly stub has more restrictive requirements
70 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
71 * defined more restrictively in <asm/kexec.h>.
72 *
73 * The code for the transition from the current kernel to the
74 * new kernel is placed in the control_code_buffer, whose size
75 * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single
76 * page of memory is necessary, but some architectures require more.
77 * Because this memory must be identity mapped in the transition from
78 * virtual to physical addresses it must live in the range
79 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
80 * modifiable.
81 *
82 * The assembly stub in the control code buffer is passed a linked list
83 * of descriptor pages detailing the source pages of the new kernel,
84 * and the destination addresses of those source pages. As this data
85 * structure is not used in the context of the current OS, it must
86 * be self-contained.
87 *
88 * The code has been made to work with highmem pages and will use a
89 * destination page in its final resting place (if it happens
90 * to allocate it). The end product of this is that most of the
91 * physical address space, and most of RAM can be used.
92 *
93 * Future directions include:
94 * - allocating a page table with the control code buffer identity
95 * mapped, to simplify machine_kexec and make kexec_on_panic more
96 * reliable.
97 */
98
99/*
100 * KIMAGE_NO_DEST is an impossible destination address..., for
101 * allocating pages whose destination address we do not care about.
102 */
103#define KIMAGE_NO_DEST (-1UL)
104#define PAGE_COUNT(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
105
106static struct page *kimage_alloc_page(struct kimage *image,
107 gfp_t gfp_mask,
108 unsigned long dest);
109
110int sanity_check_segment_list(struct kimage *image)
111{
112 int i;
113 unsigned long nr_segments = image->nr_segments;
114 unsigned long total_pages = 0;
115 unsigned long nr_pages = totalram_pages();
116
117 /*
118 * Verify we have good destination addresses. The caller is
119 * responsible for making certain we don't attempt to load
120 * the new image into invalid or reserved areas of RAM. This
121 * just verifies it is an address we can use.
122 *
123 * Since the kernel does everything in page size chunks ensure
124 * the destination addresses are page aligned. Too many
125 * special cases crop of when we don't do this. The most
126 * insidious is getting overlapping destination addresses
127 * simply because addresses are changed to page size
128 * granularity.
129 */
130 for (i = 0; i < nr_segments; i++) {
131 unsigned long mstart, mend;
132
133 mstart = image->segment[i].mem;
134 mend = mstart + image->segment[i].memsz;
135 if (mstart > mend)
136 return -EADDRNOTAVAIL;
137 if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
138 return -EADDRNOTAVAIL;
139 if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
140 return -EADDRNOTAVAIL;
141 }
142
143 /* Verify our destination addresses do not overlap.
144 * If we alloed overlapping destination addresses
145 * through very weird things can happen with no
146 * easy explanation as one segment stops on another.
147 */
148 for (i = 0; i < nr_segments; i++) {
149 unsigned long mstart, mend;
150 unsigned long j;
151
152 mstart = image->segment[i].mem;
153 mend = mstart + image->segment[i].memsz;
154 for (j = 0; j < i; j++) {
155 unsigned long pstart, pend;
156
157 pstart = image->segment[j].mem;
158 pend = pstart + image->segment[j].memsz;
159 /* Do the segments overlap ? */
160 if ((mend > pstart) && (mstart < pend))
161 return -EINVAL;
162 }
163 }
164
165 /* Ensure our buffer sizes are strictly less than
166 * our memory sizes. This should always be the case,
167 * and it is easier to check up front than to be surprised
168 * later on.
169 */
170 for (i = 0; i < nr_segments; i++) {
171 if (image->segment[i].bufsz > image->segment[i].memsz)
172 return -EINVAL;
173 }
174
175 /*
176 * Verify that no more than half of memory will be consumed. If the
177 * request from userspace is too large, a large amount of time will be
178 * wasted allocating pages, which can cause a soft lockup.
179 */
180 for (i = 0; i < nr_segments; i++) {
181 if (PAGE_COUNT(image->segment[i].memsz) > nr_pages / 2)
182 return -EINVAL;
183
184 total_pages += PAGE_COUNT(image->segment[i].memsz);
185 }
186
187 if (total_pages > nr_pages / 2)
188 return -EINVAL;
189
190#ifdef CONFIG_CRASH_DUMP
191 /*
192 * Verify we have good destination addresses. Normally
193 * the caller is responsible for making certain we don't
194 * attempt to load the new image into invalid or reserved
195 * areas of RAM. But crash kernels are preloaded into a
196 * reserved area of ram. We must ensure the addresses
197 * are in the reserved area otherwise preloading the
198 * kernel could corrupt things.
199 */
200
201 if (image->type == KEXEC_TYPE_CRASH) {
202 for (i = 0; i < nr_segments; i++) {
203 unsigned long mstart, mend;
204
205 mstart = image->segment[i].mem;
206 mend = mstart + image->segment[i].memsz - 1;
207 /* Ensure we are within the crash kernel limits */
208 if ((mstart < phys_to_boot_phys(crashk_res.start)) ||
209 (mend > phys_to_boot_phys(crashk_res.end)))
210 return -EADDRNOTAVAIL;
211 }
212 }
213#endif
214
215 /*
216 * The destination addresses are searched from system RAM rather than
217 * being allocated from the buddy allocator, so they are not guaranteed
218 * to be accepted by the current kernel. Accept the destination
219 * addresses before kexec swaps their content with the segments' source
220 * pages to avoid accessing memory before it is accepted.
221 */
222 for (i = 0; i < nr_segments; i++)
223 accept_memory(image->segment[i].mem, image->segment[i].memsz);
224
225 return 0;
226}
227
228struct kimage *do_kimage_alloc_init(void)
229{
230 struct kimage *image;
231
232 /* Allocate a controlling structure */
233 image = kzalloc_obj(*image);
234 if (!image)
235 return NULL;
236
237 image->entry = &image->head;
238 image->last_entry = &image->head;
239 image->control_page = ~0; /* By default this does not apply */
240 image->type = KEXEC_TYPE_DEFAULT;
241
242 /* Initialize the list of control pages */
243 INIT_LIST_HEAD(&image->control_pages);
244
245 /* Initialize the list of destination pages */
246 INIT_LIST_HEAD(&image->dest_pages);
247
248 /* Initialize the list of unusable pages */
249 INIT_LIST_HEAD(&image->unusable_pages);
250
251#ifdef CONFIG_CRASH_HOTPLUG
252 image->hp_action = KEXEC_CRASH_HP_NONE;
253 image->elfcorehdr_index = -1;
254 image->elfcorehdr_updated = false;
255#endif
256
257 return image;
258}
259
260int kimage_is_destination_range(struct kimage *image,
261 unsigned long start,
262 unsigned long end)
263{
264 unsigned long i;
265
266 for (i = 0; i < image->nr_segments; i++) {
267 unsigned long mstart, mend;
268
269 mstart = image->segment[i].mem;
270 mend = mstart + image->segment[i].memsz - 1;
271 if ((end >= mstart) && (start <= mend))
272 return 1;
273 }
274
275 return 0;
276}
277
278static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
279{
280 struct page *pages;
281
282 if (fatal_signal_pending(current))
283 return NULL;
284 pages = alloc_pages(gfp_mask & ~__GFP_ZERO, order);
285 if (pages) {
286 unsigned int count, i;
287
288 pages->mapping = NULL;
289 set_page_private(pages, order);
290 count = 1 << order;
291 for (i = 0; i < count; i++)
292 SetPageReserved(pages + i);
293
294 arch_kexec_post_alloc_pages(page_address(pages), count,
295 gfp_mask);
296
297 if (gfp_mask & __GFP_ZERO)
298 for (i = 0; i < count; i++)
299 clear_highpage(pages + i);
300 }
301
302 return pages;
303}
304
305static void kimage_free_pages(struct page *page)
306{
307 unsigned int order, count, i;
308
309 order = page_private(page);
310 count = 1 << order;
311
312 arch_kexec_pre_free_pages(page_address(page), count);
313
314 for (i = 0; i < count; i++)
315 ClearPageReserved(page + i);
316 __free_pages(page, order);
317}
318
319void kimage_free_page_list(struct list_head *list)
320{
321 struct page *page, *next;
322
323 list_for_each_entry_safe(page, next, list, lru) {
324 list_del(&page->lru);
325 kimage_free_pages(page);
326 }
327}
328
329static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
330 unsigned int order)
331{
332 /* Control pages are special, they are the intermediaries
333 * that are needed while we copy the rest of the pages
334 * to their final resting place. As such they must
335 * not conflict with either the destination addresses
336 * or memory the kernel is already using.
337 *
338 * The only case where we really need more than one of
339 * these are for architectures where we cannot disable
340 * the MMU and must instead generate an identity mapped
341 * page table for all of the memory.
342 *
343 * At worst this runs in O(N) of the image size.
344 */
345 struct list_head extra_pages;
346 struct page *pages;
347 unsigned int count;
348
349 count = 1 << order;
350 INIT_LIST_HEAD(&extra_pages);
351
352 /* Loop while I can allocate a page and the page allocated
353 * is a destination page.
354 */
355 do {
356 unsigned long pfn, epfn, addr, eaddr;
357
358 pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order);
359 if (!pages)
360 break;
361 pfn = page_to_boot_pfn(pages);
362 epfn = pfn + count;
363 addr = pfn << PAGE_SHIFT;
364 eaddr = (epfn << PAGE_SHIFT) - 1;
365 if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
366 kimage_is_destination_range(image, addr, eaddr)) {
367 list_add(&pages->lru, &extra_pages);
368 pages = NULL;
369 }
370 } while (!pages);
371
372 if (pages) {
373 /* Remember the allocated page... */
374 list_add(&pages->lru, &image->control_pages);
375
376 /* Because the page is already in it's destination
377 * location we will never allocate another page at
378 * that address. Therefore kimage_alloc_pages
379 * will not return it (again) and we don't need
380 * to give it an entry in image->segment[].
381 */
382 }
383 /* Deal with the destination pages I have inadvertently allocated.
384 *
385 * Ideally I would convert multi-page allocations into single
386 * page allocations, and add everything to image->dest_pages.
387 *
388 * For now it is simpler to just free the pages.
389 */
390 kimage_free_page_list(&extra_pages);
391
392 return pages;
393}
394
395#ifdef CONFIG_CRASH_DUMP
396static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
397 unsigned int order)
398{
399 /* Control pages are special, they are the intermediaries
400 * that are needed while we copy the rest of the pages
401 * to their final resting place. As such they must
402 * not conflict with either the destination addresses
403 * or memory the kernel is already using.
404 *
405 * Control pages are also the only pags we must allocate
406 * when loading a crash kernel. All of the other pages
407 * are specified by the segments and we just memcpy
408 * into them directly.
409 *
410 * The only case where we really need more than one of
411 * these are for architectures where we cannot disable
412 * the MMU and must instead generate an identity mapped
413 * page table for all of the memory.
414 *
415 * Given the low demand this implements a very simple
416 * allocator that finds the first hole of the appropriate
417 * size in the reserved memory region, and allocates all
418 * of the memory up to and including the hole.
419 */
420 unsigned long hole_start, hole_end, size;
421 struct page *pages;
422
423 pages = NULL;
424 size = (1 << order) << PAGE_SHIFT;
425 hole_start = ALIGN(image->control_page, size);
426 hole_end = hole_start + size - 1;
427 while (hole_end <= crashk_res.end) {
428 unsigned long i;
429
430 cond_resched();
431
432 if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
433 break;
434 /* See if I overlap any of the segments */
435 for (i = 0; i < image->nr_segments; i++) {
436 unsigned long mstart, mend;
437
438 mstart = image->segment[i].mem;
439 mend = mstart + image->segment[i].memsz - 1;
440 if ((hole_end >= mstart) && (hole_start <= mend)) {
441 /* Advance the hole to the end of the segment */
442 hole_start = ALIGN(mend, size);
443 hole_end = hole_start + size - 1;
444 break;
445 }
446 }
447 /* If I don't overlap any segments I have found my hole! */
448 if (i == image->nr_segments) {
449 pages = pfn_to_page(hole_start >> PAGE_SHIFT);
450 image->control_page = hole_end + 1;
451 break;
452 }
453 }
454
455 /* Ensure that these pages are decrypted if SME is enabled. */
456 if (pages)
457 arch_kexec_post_alloc_pages(page_address(pages), 1 << order, 0);
458
459 return pages;
460}
461#endif
462
463
464struct page *kimage_alloc_control_pages(struct kimage *image,
465 unsigned int order)
466{
467 struct page *pages = NULL;
468
469 switch (image->type) {
470 case KEXEC_TYPE_DEFAULT:
471 pages = kimage_alloc_normal_control_pages(image, order);
472 break;
473#ifdef CONFIG_CRASH_DUMP
474 case KEXEC_TYPE_CRASH:
475 pages = kimage_alloc_crash_control_pages(image, order);
476 break;
477#endif
478 }
479
480 return pages;
481}
482
483static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
484{
485 if (*image->entry != 0)
486 image->entry++;
487
488 if (image->entry == image->last_entry) {
489 kimage_entry_t *ind_page;
490 struct page *page;
491
492 page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
493 if (!page)
494 return -ENOMEM;
495
496 ind_page = page_address(page);
497 *image->entry = virt_to_boot_phys(ind_page) | IND_INDIRECTION;
498 image->entry = ind_page;
499 image->last_entry = ind_page +
500 ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
501 }
502 *image->entry = entry;
503 image->entry++;
504 *image->entry = 0;
505
506 return 0;
507}
508
509static int kimage_set_destination(struct kimage *image,
510 unsigned long destination)
511{
512 destination &= PAGE_MASK;
513
514 return kimage_add_entry(image, destination | IND_DESTINATION);
515}
516
517
518static int kimage_add_page(struct kimage *image, unsigned long page)
519{
520 page &= PAGE_MASK;
521
522 return kimage_add_entry(image, page | IND_SOURCE);
523}
524
525
526static void kimage_free_extra_pages(struct kimage *image)
527{
528 /* Walk through and free any extra destination pages I may have */
529 kimage_free_page_list(&image->dest_pages);
530
531 /* Walk through and free any unusable pages I have cached */
532 kimage_free_page_list(&image->unusable_pages);
533
534}
535
536void kimage_terminate(struct kimage *image)
537{
538 if (*image->entry != 0)
539 image->entry++;
540
541 *image->entry = IND_DONE;
542}
543
544#define for_each_kimage_entry(image, ptr, entry) \
545 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
546 ptr = (entry & IND_INDIRECTION) ? \
547 boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
548
549static void kimage_free_entry(kimage_entry_t entry)
550{
551 struct page *page;
552
553 page = boot_pfn_to_page(entry >> PAGE_SHIFT);
554 kimage_free_pages(page);
555}
556
557static void kimage_free_cma(struct kimage *image)
558{
559 unsigned long i;
560
561 for (i = 0; i < image->nr_segments; i++) {
562 struct page *cma = image->segment_cma[i];
563 u32 nr_pages = image->segment[i].memsz >> PAGE_SHIFT;
564
565 if (!cma)
566 continue;
567
568 arch_kexec_pre_free_pages(page_address(cma), nr_pages);
569 dma_release_from_contiguous(NULL, cma, nr_pages);
570 image->segment_cma[i] = NULL;
571 }
572
573}
574
575void kimage_free(struct kimage *image)
576{
577 kimage_entry_t *ptr, entry;
578 kimage_entry_t ind = 0;
579
580 if (!image)
581 return;
582
583#ifdef CONFIG_CRASH_DUMP
584 if (image->vmcoreinfo_data_copy) {
585 crash_update_vmcoreinfo_safecopy(NULL);
586 vunmap(image->vmcoreinfo_data_copy);
587 }
588#endif
589
590 kimage_free_extra_pages(image);
591 for_each_kimage_entry(image, ptr, entry) {
592 if (entry & IND_INDIRECTION) {
593 /* Free the previous indirection page */
594 if (ind & IND_INDIRECTION)
595 kimage_free_entry(ind);
596 /* Save this indirection page until we are
597 * done with it.
598 */
599 ind = entry;
600 } else if (entry & IND_SOURCE)
601 kimage_free_entry(entry);
602 }
603 /* Free the final indirection page */
604 if (ind & IND_INDIRECTION)
605 kimage_free_entry(ind);
606
607 /* Handle any machine specific cleanup */
608 machine_kexec_cleanup(image);
609
610 /* Free the kexec control pages... */
611 kimage_free_page_list(&image->control_pages);
612
613 /* Free CMA allocations */
614 kimage_free_cma(image);
615
616 /*
617 * Free up any temporary buffers allocated. This might hit if
618 * error occurred much later after buffer allocation.
619 */
620 if (image->file_mode)
621 kimage_file_post_load_cleanup(image);
622
623 kfree(image);
624}
625
626static kimage_entry_t *kimage_dst_used(struct kimage *image,
627 unsigned long page)
628{
629 kimage_entry_t *ptr, entry;
630 unsigned long destination = 0;
631
632 for_each_kimage_entry(image, ptr, entry) {
633 if (entry & IND_DESTINATION)
634 destination = entry & PAGE_MASK;
635 else if (entry & IND_SOURCE) {
636 if (page == destination)
637 return ptr;
638 destination += PAGE_SIZE;
639 }
640 }
641
642 return NULL;
643}
644
645static struct page *kimage_alloc_page(struct kimage *image,
646 gfp_t gfp_mask,
647 unsigned long destination)
648{
649 /*
650 * Here we implement safeguards to ensure that a source page
651 * is not copied to its destination page before the data on
652 * the destination page is no longer useful.
653 *
654 * To do this we maintain the invariant that a source page is
655 * either its own destination page, or it is not a
656 * destination page at all.
657 *
658 * That is slightly stronger than required, but the proof
659 * that no problems will not occur is trivial, and the
660 * implementation is simply to verify.
661 *
662 * When allocating all pages normally this algorithm will run
663 * in O(N) time, but in the worst case it will run in O(N^2)
664 * time. If the runtime is a problem the data structures can
665 * be fixed.
666 */
667 struct page *page;
668 unsigned long addr;
669
670 /*
671 * Walk through the list of destination pages, and see if I
672 * have a match.
673 */
674 list_for_each_entry(page, &image->dest_pages, lru) {
675 addr = page_to_boot_pfn(page) << PAGE_SHIFT;
676 if (addr == destination) {
677 list_del(&page->lru);
678 return page;
679 }
680 }
681 page = NULL;
682 while (1) {
683 kimage_entry_t *old;
684
685 /* Allocate a page, if we run out of memory give up */
686 page = kimage_alloc_pages(gfp_mask, 0);
687 if (!page)
688 return NULL;
689 /* If the page cannot be used file it away */
690 if (page_to_boot_pfn(page) >
691 (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
692 list_add(&page->lru, &image->unusable_pages);
693 continue;
694 }
695 addr = page_to_boot_pfn(page) << PAGE_SHIFT;
696
697 /* If it is the destination page we want use it */
698 if (addr == destination)
699 break;
700
701 /* If the page is not a destination page use it */
702 if (!kimage_is_destination_range(image, addr,
703 addr + PAGE_SIZE - 1))
704 break;
705
706 /*
707 * I know that the page is someones destination page.
708 * See if there is already a source page for this
709 * destination page. And if so swap the source pages.
710 */
711 old = kimage_dst_used(image, addr);
712 if (old) {
713 /* If so move it */
714 unsigned long old_addr;
715 struct page *old_page;
716
717 old_addr = *old & PAGE_MASK;
718 old_page = boot_pfn_to_page(old_addr >> PAGE_SHIFT);
719 copy_highpage(page, old_page);
720 *old = addr | (*old & ~PAGE_MASK);
721
722 /* The old page I have found cannot be a
723 * destination page, so return it if it's
724 * gfp_flags honor the ones passed in.
725 */
726 if (!(gfp_mask & __GFP_HIGHMEM) &&
727 PageHighMem(old_page)) {
728 kimage_free_pages(old_page);
729 continue;
730 }
731 page = old_page;
732 break;
733 }
734 /* Place the page on the destination list, to be used later */
735 list_add(&page->lru, &image->dest_pages);
736 }
737
738 return page;
739}
740
741static int kimage_load_cma_segment(struct kimage *image, int idx)
742{
743 struct kexec_segment *segment = &image->segment[idx];
744 struct page *cma = image->segment_cma[idx];
745 char *ptr = page_address(cma);
746 size_t ubytes, mbytes;
747 int result = 0;
748 unsigned char __user *buf = NULL;
749 unsigned char *kbuf = NULL;
750
751 if (image->file_mode)
752 kbuf = segment->kbuf;
753 else
754 buf = segment->buf;
755 ubytes = segment->bufsz;
756 mbytes = segment->memsz;
757
758 /* Then copy from source buffer to the CMA one */
759 while (mbytes) {
760 size_t uchunk, mchunk;
761
762 mchunk = min_t(size_t, mbytes, PAGE_SIZE);
763 uchunk = min(ubytes, mchunk);
764
765 if (uchunk) {
766 /* For file based kexec, source pages are in kernel memory */
767 if (image->file_mode)
768 memcpy(ptr, kbuf, uchunk);
769 else
770 result = copy_from_user(ptr, buf, uchunk);
771 ubytes -= uchunk;
772 if (image->file_mode)
773 kbuf += uchunk;
774 else
775 buf += uchunk;
776 }
777
778 if (result) {
779 result = -EFAULT;
780 goto out;
781 }
782
783 ptr += mchunk;
784 mbytes -= mchunk;
785
786 cond_resched();
787 }
788
789 /* Clear any remainder */
790 memset(ptr, 0, mbytes);
791
792out:
793 return result;
794}
795
796static int kimage_load_normal_segment(struct kimage *image, int idx)
797{
798 struct kexec_segment *segment = &image->segment[idx];
799 unsigned long maddr;
800 size_t ubytes, mbytes;
801 int result;
802 unsigned char __user *buf = NULL;
803 unsigned char *kbuf = NULL;
804
805 if (image->file_mode)
806 kbuf = segment->kbuf;
807 else
808 buf = segment->buf;
809 ubytes = segment->bufsz;
810 mbytes = segment->memsz;
811 maddr = segment->mem;
812
813 if (image->segment_cma[idx])
814 return kimage_load_cma_segment(image, idx);
815
816 result = kimage_set_destination(image, maddr);
817 if (result < 0)
818 goto out;
819
820 while (mbytes) {
821 struct page *page;
822 char *ptr;
823 size_t uchunk, mchunk;
824
825 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
826 if (!page) {
827 result = -ENOMEM;
828 goto out;
829 }
830 result = kimage_add_page(image, page_to_boot_pfn(page)
831 << PAGE_SHIFT);
832 if (result < 0)
833 goto out;
834
835 ptr = kmap_local_page(page);
836 /* Start with a clear page */
837 clear_page(ptr);
838 mchunk = min_t(size_t, mbytes, PAGE_SIZE);
839 uchunk = min(ubytes, mchunk);
840
841 if (uchunk) {
842 /* For file based kexec, source pages are in kernel memory */
843 if (image->file_mode)
844 memcpy(ptr, kbuf, uchunk);
845 else
846 result = copy_from_user(ptr, buf, uchunk);
847 ubytes -= uchunk;
848 if (image->file_mode)
849 kbuf += uchunk;
850 else
851 buf += uchunk;
852 }
853 kunmap_local(ptr);
854 if (result) {
855 result = -EFAULT;
856 goto out;
857 }
858 maddr += mchunk;
859 mbytes -= mchunk;
860
861 cond_resched();
862 }
863out:
864 return result;
865}
866
867#ifdef CONFIG_CRASH_DUMP
868static int kimage_load_crash_segment(struct kimage *image, int idx)
869{
870 /* For crash dumps kernels we simply copy the data from
871 * user space to it's destination.
872 * We do things a page at a time for the sake of kmap.
873 */
874 struct kexec_segment *segment = &image->segment[idx];
875 unsigned long maddr;
876 size_t ubytes, mbytes;
877 int result;
878 unsigned char __user *buf = NULL;
879 unsigned char *kbuf = NULL;
880
881 result = 0;
882 if (image->file_mode)
883 kbuf = segment->kbuf;
884 else
885 buf = segment->buf;
886 ubytes = segment->bufsz;
887 mbytes = segment->memsz;
888 maddr = segment->mem;
889 while (mbytes) {
890 struct page *page;
891 char *ptr;
892 size_t uchunk, mchunk;
893
894 page = boot_pfn_to_page(maddr >> PAGE_SHIFT);
895 if (!page) {
896 result = -ENOMEM;
897 goto out;
898 }
899 arch_kexec_post_alloc_pages(page_address(page), 1, 0);
900 ptr = kmap_local_page(page);
901 mchunk = min_t(size_t, mbytes, PAGE_SIZE);
902 uchunk = min(ubytes, mchunk);
903 if (mchunk > uchunk) {
904 /* Zero the trailing part of the page */
905 memset(ptr + uchunk, 0, mchunk - uchunk);
906 }
907
908 if (uchunk) {
909 /* For file based kexec, source pages are in kernel memory */
910 if (image->file_mode)
911 memcpy(ptr, kbuf, uchunk);
912 else
913 result = copy_from_user(ptr, buf, uchunk);
914 ubytes -= uchunk;
915 if (image->file_mode)
916 kbuf += uchunk;
917 else
918 buf += uchunk;
919 }
920 kexec_flush_icache_page(page);
921 kunmap_local(ptr);
922 arch_kexec_pre_free_pages(page_address(page), 1);
923 if (result) {
924 result = -EFAULT;
925 goto out;
926 }
927 maddr += mchunk;
928 mbytes -= mchunk;
929
930 cond_resched();
931 }
932out:
933 return result;
934}
935#endif
936
937int kimage_load_segment(struct kimage *image, int idx)
938{
939 int result = -ENOMEM;
940
941 switch (image->type) {
942 case KEXEC_TYPE_DEFAULT:
943 result = kimage_load_normal_segment(image, idx);
944 break;
945#ifdef CONFIG_CRASH_DUMP
946 case KEXEC_TYPE_CRASH:
947 result = kimage_load_crash_segment(image, idx);
948 break;
949#endif
950 }
951
952 return result;
953}
954
955void *kimage_map_segment(struct kimage *image, int idx)
956{
957 unsigned long addr, size, eaddr;
958 unsigned long src_page_addr, dest_page_addr = 0;
959 kimage_entry_t *ptr, entry;
960 struct page **src_pages;
961 unsigned int npages;
962 struct page *cma;
963 void *vaddr = NULL;
964 int i;
965
966 cma = image->segment_cma[idx];
967 if (cma)
968 return page_address(cma);
969
970 addr = image->segment[idx].mem;
971 size = image->segment[idx].memsz;
972 eaddr = addr + size;
973 /*
974 * Collect the source pages and map them in a contiguous VA range.
975 */
976 npages = PFN_UP(eaddr) - PFN_DOWN(addr);
977 src_pages = kmalloc_objs(*src_pages, npages);
978 if (!src_pages) {
979 pr_err("Could not allocate ima pages array.\n");
980 return NULL;
981 }
982
983 i = 0;
984 for_each_kimage_entry(image, ptr, entry) {
985 if (entry & IND_DESTINATION) {
986 dest_page_addr = entry & PAGE_MASK;
987 } else if (entry & IND_SOURCE) {
988 if (dest_page_addr >= addr && dest_page_addr < eaddr) {
989 src_page_addr = entry & PAGE_MASK;
990 src_pages[i++] =
991 virt_to_page(__va(src_page_addr));
992 if (i == npages)
993 break;
994 dest_page_addr += PAGE_SIZE;
995 }
996 }
997 }
998
999 /* Sanity check. */
1000 WARN_ON(i < npages);
1001
1002 vaddr = vmap(src_pages, npages, VM_MAP, PAGE_KERNEL);
1003 kfree(src_pages);
1004
1005 if (!vaddr)
1006 pr_err("Could not map ima buffer.\n");
1007
1008 return vaddr;
1009}
1010
1011void kimage_unmap_segment(void *segment_buffer)
1012{
1013 if (is_vmalloc_addr(segment_buffer))
1014 vunmap(segment_buffer);
1015}
1016
1017struct kexec_load_limit {
1018 /* Mutex protects the limit count. */
1019 struct mutex mutex;
1020 int limit;
1021};
1022
1023static struct kexec_load_limit load_limit_reboot = {
1024 .mutex = __MUTEX_INITIALIZER(load_limit_reboot.mutex),
1025 .limit = -1,
1026};
1027
1028static struct kexec_load_limit load_limit_panic = {
1029 .mutex = __MUTEX_INITIALIZER(load_limit_panic.mutex),
1030 .limit = -1,
1031};
1032
1033struct kimage *kexec_image;
1034struct kimage *kexec_crash_image;
1035static int kexec_load_disabled;
1036
1037#ifdef CONFIG_SYSCTL
1038static int kexec_limit_handler(const struct ctl_table *table, int write,
1039 void *buffer, size_t *lenp, loff_t *ppos)
1040{
1041 struct kexec_load_limit *limit = table->data;
1042 int val;
1043 struct ctl_table tmp = {
1044 .data = &val,
1045 .maxlen = sizeof(val),
1046 .mode = table->mode,
1047 };
1048 int ret;
1049
1050 if (write) {
1051 ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
1052 if (ret)
1053 return ret;
1054
1055 if (val < 0)
1056 return -EINVAL;
1057
1058 mutex_lock(&limit->mutex);
1059 if (limit->limit != -1 && val >= limit->limit)
1060 ret = -EINVAL;
1061 else
1062 limit->limit = val;
1063 mutex_unlock(&limit->mutex);
1064
1065 return ret;
1066 }
1067
1068 mutex_lock(&limit->mutex);
1069 val = limit->limit;
1070 mutex_unlock(&limit->mutex);
1071
1072 return proc_dointvec(&tmp, write, buffer, lenp, ppos);
1073}
1074
1075static const struct ctl_table kexec_core_sysctls[] = {
1076 {
1077 .procname = "kexec_load_disabled",
1078 .data = &kexec_load_disabled,
1079 .maxlen = sizeof(int),
1080 .mode = 0644,
1081 /* only handle a transition from default "0" to "1" */
1082 .proc_handler = proc_dointvec_minmax,
1083 .extra1 = SYSCTL_ONE,
1084 .extra2 = SYSCTL_ONE,
1085 },
1086 {
1087 .procname = "kexec_load_limit_panic",
1088 .data = &load_limit_panic,
1089 .mode = 0644,
1090 .proc_handler = kexec_limit_handler,
1091 },
1092 {
1093 .procname = "kexec_load_limit_reboot",
1094 .data = &load_limit_reboot,
1095 .mode = 0644,
1096 .proc_handler = kexec_limit_handler,
1097 },
1098};
1099
1100static int __init kexec_core_sysctl_init(void)
1101{
1102 register_sysctl_init("kernel", kexec_core_sysctls);
1103 return 0;
1104}
1105late_initcall(kexec_core_sysctl_init);
1106#endif
1107
1108bool kexec_load_permitted(int kexec_image_type)
1109{
1110 struct kexec_load_limit *limit;
1111
1112 /*
1113 * Only the superuser can use the kexec syscall and if it has not
1114 * been disabled.
1115 */
1116 if (!capable(CAP_SYS_BOOT) || kexec_load_disabled)
1117 return false;
1118
1119 /* Check limit counter and decrease it.*/
1120 limit = (kexec_image_type == KEXEC_TYPE_CRASH) ?
1121 &load_limit_panic : &load_limit_reboot;
1122 mutex_lock(&limit->mutex);
1123 if (!limit->limit) {
1124 mutex_unlock(&limit->mutex);
1125 return false;
1126 }
1127 if (limit->limit != -1)
1128 limit->limit--;
1129 mutex_unlock(&limit->mutex);
1130
1131 return true;
1132}
1133
1134/*
1135 * Move into place and start executing a preloaded standalone
1136 * executable. If nothing was preloaded return an error.
1137 */
1138int kernel_kexec(void)
1139{
1140 int error = 0;
1141
1142 if (!kexec_trylock())
1143 return -EBUSY;
1144 if (!kexec_image) {
1145 error = -EINVAL;
1146 goto Unlock;
1147 }
1148
1149 error = liveupdate_reboot();
1150 if (error)
1151 goto Unlock;
1152
1153#ifdef CONFIG_KEXEC_JUMP
1154 if (kexec_image->preserve_context) {
1155 /*
1156 * This flow is analogous to hibernation flows that occur
1157 * before creating an image and before jumping from the
1158 * restore kernel to the image one, so it uses the same
1159 * device callbacks as those two flows.
1160 */
1161 pm_prepare_console();
1162 error = freeze_processes();
1163 if (error) {
1164 error = -EBUSY;
1165 goto Restore_console;
1166 }
1167 console_suspend_all();
1168 error = dpm_suspend_start(PMSG_FREEZE);
1169 if (error)
1170 goto Resume_devices;
1171 /*
1172 * dpm_suspend_end() must be called after dpm_suspend_start()
1173 * to complete the transition, like in the hibernation flows
1174 * mentioned above.
1175 */
1176 error = dpm_suspend_end(PMSG_FREEZE);
1177 if (error)
1178 goto Resume_devices;
1179 error = suspend_disable_secondary_cpus();
1180 if (error)
1181 goto Enable_cpus;
1182 local_irq_disable();
1183 error = syscore_suspend();
1184 if (error)
1185 goto Enable_irqs;
1186 } else
1187#endif
1188 {
1189 kexec_in_progress = true;
1190 kernel_restart_prepare("kexec reboot");
1191 migrate_to_reboot_cpu();
1192 syscore_shutdown();
1193
1194 /*
1195 * migrate_to_reboot_cpu() disables CPU hotplug assuming that
1196 * no further code needs to use CPU hotplug (which is true in
1197 * the reboot case). However, the kexec path depends on using
1198 * CPU hotplug again; so re-enable it here.
1199 */
1200 cpu_hotplug_enable();
1201 pr_notice("Starting new kernel\n");
1202 machine_shutdown();
1203 }
1204
1205 kmsg_dump(KMSG_DUMP_SHUTDOWN);
1206 machine_kexec(kexec_image);
1207
1208#ifdef CONFIG_KEXEC_JUMP
1209 if (kexec_image->preserve_context) {
1210 /*
1211 * This flow is analogous to hibernation flows that occur after
1212 * creating an image and after the image kernel has got control
1213 * back, and in case the devices have been reset or otherwise
1214 * manipulated in the meantime, it uses the device callbacks
1215 * used by the latter.
1216 */
1217 syscore_resume();
1218 Enable_irqs:
1219 local_irq_enable();
1220 Enable_cpus:
1221 suspend_enable_secondary_cpus();
1222 dpm_resume_start(PMSG_RESTORE);
1223 Resume_devices:
1224 dpm_resume_end(PMSG_RESTORE);
1225 console_resume_all();
1226 thaw_processes();
1227 Restore_console:
1228 pm_restore_console();
1229 }
1230#endif
1231
1232 Unlock:
1233 kexec_unlock();
1234 return error;
1235}
1236
1237static ssize_t loaded_show(struct kobject *kobj,
1238 struct kobj_attribute *attr, char *buf)
1239{
1240 return sysfs_emit(buf, "%d\n", !!kexec_image);
1241}
1242static struct kobj_attribute loaded_attr = __ATTR_RO(loaded);
1243
1244#ifdef CONFIG_CRASH_DUMP
1245static ssize_t crash_loaded_show(struct kobject *kobj,
1246 struct kobj_attribute *attr, char *buf)
1247{
1248 return sysfs_emit(buf, "%d\n", kexec_crash_loaded());
1249}
1250static struct kobj_attribute crash_loaded_attr = __ATTR_RO(crash_loaded);
1251
1252#ifdef CONFIG_CRASH_RESERVE
1253static ssize_t crash_cma_ranges_show(struct kobject *kobj,
1254 struct kobj_attribute *attr, char *buf)
1255{
1256
1257 ssize_t len = 0;
1258 int i;
1259
1260 for (i = 0; i < crashk_cma_cnt; ++i) {
1261 len += sysfs_emit_at(buf, len, "%08llx-%08llx\n",
1262 crashk_cma_ranges[i].start,
1263 crashk_cma_ranges[i].end);
1264 }
1265 return len;
1266}
1267static struct kobj_attribute crash_cma_ranges_attr = __ATTR_RO(crash_cma_ranges);
1268#endif
1269
1270static ssize_t crash_size_show(struct kobject *kobj,
1271 struct kobj_attribute *attr, char *buf)
1272{
1273 ssize_t size = crash_get_memory_size();
1274
1275 if (size < 0)
1276 return size;
1277
1278 return sysfs_emit(buf, "%zd\n", size);
1279}
1280static ssize_t crash_size_store(struct kobject *kobj,
1281 struct kobj_attribute *attr,
1282 const char *buf, size_t count)
1283{
1284 unsigned long cnt;
1285 int ret;
1286
1287 if (kstrtoul(buf, 0, &cnt))
1288 return -EINVAL;
1289
1290 ret = crash_shrink_memory(cnt);
1291 return ret < 0 ? ret : count;
1292}
1293static struct kobj_attribute crash_size_attr = __ATTR_RW(crash_size);
1294
1295#ifdef CONFIG_CRASH_HOTPLUG
1296static ssize_t crash_elfcorehdr_size_show(struct kobject *kobj,
1297 struct kobj_attribute *attr, char *buf)
1298{
1299 unsigned int sz = crash_get_elfcorehdr_size();
1300
1301 return sysfs_emit(buf, "%u\n", sz);
1302}
1303static struct kobj_attribute crash_elfcorehdr_size_attr = __ATTR_RO(crash_elfcorehdr_size);
1304
1305#endif /* CONFIG_CRASH_HOTPLUG */
1306#endif /* CONFIG_CRASH_DUMP */
1307
1308static struct attribute *kexec_attrs[] = {
1309 &loaded_attr.attr,
1310#ifdef CONFIG_CRASH_DUMP
1311 &crash_loaded_attr.attr,
1312 &crash_size_attr.attr,
1313#ifdef CONFIG_CRASH_RESERVE
1314 &crash_cma_ranges_attr.attr,
1315#endif
1316#ifdef CONFIG_CRASH_HOTPLUG
1317 &crash_elfcorehdr_size_attr.attr,
1318#endif
1319#endif
1320 NULL
1321};
1322
1323struct kexec_link_entry {
1324 const char *target;
1325 const char *name;
1326};
1327
1328static struct kexec_link_entry kexec_links[] = {
1329 { "loaded", "kexec_loaded" },
1330#ifdef CONFIG_CRASH_DUMP
1331 { "crash_loaded", "kexec_crash_loaded" },
1332 { "crash_size", "kexec_crash_size" },
1333#ifdef CONFIG_CRASH_RESERVE
1334 {"crash_cma_ranges", "kexec_crash_cma_ranges"},
1335#endif
1336#ifdef CONFIG_CRASH_HOTPLUG
1337 { "crash_elfcorehdr_size", "crash_elfcorehdr_size" },
1338#endif
1339#endif
1340};
1341
1342static struct kobject *kexec_kobj;
1343ATTRIBUTE_GROUPS(kexec);
1344
1345static int __init init_kexec_sysctl(void)
1346{
1347 int error;
1348 int i;
1349
1350 kexec_kobj = kobject_create_and_add("kexec", kernel_kobj);
1351 if (!kexec_kobj) {
1352 pr_err("failed to create kexec kobject\n");
1353 return -ENOMEM;
1354 }
1355
1356 error = sysfs_create_groups(kexec_kobj, kexec_groups);
1357 if (error)
1358 goto kset_exit;
1359
1360 for (i = 0; i < ARRAY_SIZE(kexec_links); i++) {
1361 error = compat_only_sysfs_link_entry_to_kobj(kernel_kobj, kexec_kobj,
1362 kexec_links[i].target,
1363 kexec_links[i].name);
1364 if (error)
1365 pr_err("Unable to create %s symlink (%d)", kexec_links[i].name, error);
1366 }
1367
1368 return 0;
1369
1370kset_exit:
1371 kobject_put(kexec_kobj);
1372 return error;
1373}
1374
1375subsys_initcall(init_kexec_sysctl);