Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

kho: allow memory preservation state updates after finalization

Currently, kho_preserve_* and kho_unpreserve_* return -EBUSY if KHO is
finalized. This enforces a rigid "freeze" on the KHO memory state.

With the introduction of re-entrant finalization, this restriction is no
longer necessary. Users should be allowed to modify the preservation set
(e.g., adding new pages or freeing old ones) even after an initial
finalization.

The intended workflow for updates is now:
1. Modify state (preserve/unpreserve).
2. Call kho_finalize() again to refresh the serialized metadata.

Remove the kho_out.finalized checks to enable this dynamic behavior.

This also allows to convert kho_unpreserve_* functions to void, as they do
not return any error anymore.

Link: https://lkml.kernel.org/r/20251114190002.3311679-13-pasha.tatashin@soleen.com
Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com>
Reviewed-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Reviewed-by: Pratyush Yadav <pratyush@kernel.org>
Cc: Alexander Graf <graf@amazon.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Baoquan He <bhe@redhat.com>
Cc: Coiby Xu <coxu@redhat.com>
Cc: Dave Vasilevsky <dave@vasilevsky.ca>
Cc: Eric Biggers <ebiggers@google.com>
Cc: Kees Cook <kees@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Pasha Tatashin and committed by
Andrew Morton
de51999e d7255959

+19 -57
+6 -15
include/linux/kexec_handover.h
··· 44 44 bool is_kho_boot(void); 45 45 46 46 int kho_preserve_folio(struct folio *folio); 47 - int kho_unpreserve_folio(struct folio *folio); 47 + void kho_unpreserve_folio(struct folio *folio); 48 48 int kho_preserve_pages(struct page *page, unsigned int nr_pages); 49 - int kho_unpreserve_pages(struct page *page, unsigned int nr_pages); 49 + void kho_unpreserve_pages(struct page *page, unsigned int nr_pages); 50 50 int kho_preserve_vmalloc(void *ptr, struct kho_vmalloc *preservation); 51 - int kho_unpreserve_vmalloc(struct kho_vmalloc *preservation); 51 + void kho_unpreserve_vmalloc(struct kho_vmalloc *preservation); 52 52 void *kho_alloc_preserve(size_t size); 53 53 void kho_unpreserve_free(void *mem); 54 54 void kho_restore_free(void *mem); ··· 79 79 return -EOPNOTSUPP; 80 80 } 81 81 82 - static inline int kho_unpreserve_folio(struct folio *folio) 83 - { 84 - return -EOPNOTSUPP; 85 - } 82 + static inline void kho_unpreserve_folio(struct folio *folio) { } 86 83 87 84 static inline int kho_preserve_pages(struct page *page, unsigned int nr_pages) 88 85 { 89 86 return -EOPNOTSUPP; 90 87 } 91 88 92 - static inline int kho_unpreserve_pages(struct page *page, unsigned int nr_pages) 93 - { 94 - return -EOPNOTSUPP; 95 - } 89 + static inline void kho_unpreserve_pages(struct page *page, unsigned int nr_pages) { } 96 90 97 91 static inline int kho_preserve_vmalloc(void *ptr, 98 92 struct kho_vmalloc *preservation) ··· 94 100 return -EOPNOTSUPP; 95 101 } 96 102 97 - static inline int kho_unpreserve_vmalloc(struct kho_vmalloc *preservation) 98 - { 99 - return -EOPNOTSUPP; 100 - } 103 + static inline void kho_unpreserve_vmalloc(struct kho_vmalloc *preservation) { } 101 104 102 105 static inline void *kho_alloc_preserve(size_t size) 103 106 {
+13 -42
kernel/liveupdate/kexec_handover.c
··· 185 185 const unsigned long pfn_high = pfn >> order; 186 186 187 187 might_sleep(); 188 - 189 - if (kho_out.finalized) 190 - return -EBUSY; 191 - 192 188 physxa = xa_load(&track->orders, order); 193 189 if (!physxa) { 194 190 int err; ··· 803 807 * Instructs KHO to unpreserve a folio that was preserved by 804 808 * kho_preserve_folio() before. The provided @folio (pfn and order) 805 809 * must exactly match a previously preserved folio. 806 - * 807 - * Return: 0 on success, error code on failure 808 810 */ 809 - int kho_unpreserve_folio(struct folio *folio) 811 + void kho_unpreserve_folio(struct folio *folio) 810 812 { 811 813 const unsigned long pfn = folio_pfn(folio); 812 814 const unsigned int order = folio_order(folio); 813 815 struct kho_mem_track *track = &kho_out.track; 814 816 815 - if (kho_out.finalized) 816 - return -EBUSY; 817 - 818 817 __kho_unpreserve_order(track, pfn, order); 819 - return 0; 820 818 } 821 819 EXPORT_SYMBOL_GPL(kho_unpreserve_folio); 822 820 ··· 867 877 * This must be called with the same @page and @nr_pages as the corresponding 868 878 * kho_preserve_pages() call. Unpreserving arbitrary sub-ranges of larger 869 879 * preserved blocks is not supported. 870 - * 871 - * Return: 0 on success, error code on failure 872 880 */ 873 - int kho_unpreserve_pages(struct page *page, unsigned int nr_pages) 881 + void kho_unpreserve_pages(struct page *page, unsigned int nr_pages) 874 882 { 875 883 struct kho_mem_track *track = &kho_out.track; 876 884 const unsigned long start_pfn = page_to_pfn(page); 877 885 const unsigned long end_pfn = start_pfn + nr_pages; 878 886 879 - if (kho_out.finalized) 880 - return -EBUSY; 881 - 882 887 __kho_unpreserve(track, start_pfn, end_pfn); 883 - 884 - return 0; 885 888 } 886 889 EXPORT_SYMBOL_GPL(kho_unpreserve_pages); 887 890 ··· 959 976 } 960 977 } 961 978 962 - static void kho_vmalloc_free_chunks(struct kho_vmalloc *kho_vmalloc) 963 - { 964 - struct kho_vmalloc_chunk *chunk = KHOSER_LOAD_PTR(kho_vmalloc->first); 965 - 966 - while (chunk) { 967 - struct kho_vmalloc_chunk *tmp = chunk; 968 - 969 - kho_vmalloc_unpreserve_chunk(chunk, kho_vmalloc->order); 970 - 971 - chunk = KHOSER_LOAD_PTR(chunk->hdr.next); 972 - free_page((unsigned long)tmp); 973 - } 974 - } 975 - 976 979 /** 977 980 * kho_preserve_vmalloc - preserve memory allocated with vmalloc() across kexec 978 981 * @ptr: pointer to the area in vmalloc address space ··· 1020 1051 return 0; 1021 1052 1022 1053 err_free: 1023 - kho_vmalloc_free_chunks(preservation); 1054 + kho_unpreserve_vmalloc(preservation); 1024 1055 return err; 1025 1056 } 1026 1057 EXPORT_SYMBOL_GPL(kho_preserve_vmalloc); ··· 1031 1062 * 1032 1063 * Instructs KHO to unpreserve the area in vmalloc address space that was 1033 1064 * previously preserved with kho_preserve_vmalloc(). 1034 - * 1035 - * Return: 0 on success, error code on failure 1036 1065 */ 1037 - int kho_unpreserve_vmalloc(struct kho_vmalloc *preservation) 1066 + void kho_unpreserve_vmalloc(struct kho_vmalloc *preservation) 1038 1067 { 1039 - if (kho_out.finalized) 1040 - return -EBUSY; 1068 + struct kho_vmalloc_chunk *chunk = KHOSER_LOAD_PTR(preservation->first); 1041 1069 1042 - kho_vmalloc_free_chunks(preservation); 1070 + while (chunk) { 1071 + struct kho_vmalloc_chunk *tmp = chunk; 1043 1072 1044 - return 0; 1073 + kho_vmalloc_unpreserve_chunk(chunk, preservation->order); 1074 + 1075 + chunk = KHOSER_LOAD_PTR(chunk->hdr.next); 1076 + free_page((unsigned long)tmp); 1077 + } 1045 1078 } 1046 1079 EXPORT_SYMBOL_GPL(kho_unpreserve_vmalloc); 1047 1080 ··· 1192 1221 return; 1193 1222 1194 1223 folio = virt_to_folio(mem); 1195 - WARN_ON_ONCE(kho_unpreserve_folio(folio)); 1224 + kho_unpreserve_folio(folio); 1196 1225 folio_put(folio); 1197 1226 } 1198 1227 EXPORT_SYMBOL_GPL(kho_unpreserve_free);