Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

scripts/gdb: implement x86_page_ops in mm.py

Implement all member functions of x86_page_ops strictly following the
logic of aarch64_page_ops.

This includes full support for SPARSEMEM and standard page translation
functions.

This fixes compatibility with 'lx-' commands on x86_64, preventing
AttributeErrors when using lx-pfn_to_page and others.

Link: https://lkml.kernel.org/r/20260202034241.649268-1-hsj0512@snu.ac.kr
Signed-off-by: Seongjun Hong <hsj0512@snu.ac.kr>
Cc: Jan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Seongjun Hong and committed by
Andrew Morton
55f8b451 5ed4b6b3

+173 -2
+1 -1
scripts/gdb/linux/constants.py.in
··· 150 150 if IS_BUILTIN(CONFIG_ARM64): 151 151 LX_VALUE(CONFIG_ARM64_PA_BITS) 152 152 LX_VALUE(CONFIG_ARM64_VA_BITS) 153 - LX_VALUE(CONFIG_PAGE_SHIFT) 154 153 LX_VALUE(CONFIG_ARCH_FORCE_MAX_ORDER) 154 + LX_VALUE(CONFIG_PAGE_SHIFT) 155 155 LX_CONFIG(CONFIG_SPARSEMEM) 156 156 LX_CONFIG(CONFIG_SPARSEMEM_EXTREME) 157 157 LX_CONFIG(CONFIG_SPARSEMEM_VMEMMAP)
+172 -1
scripts/gdb/linux/mm.py
··· 26 26 raise gdb.GdbError('Only support CONFIG_SPARSEMEM_VMEMMAP now') 27 27 if constants.LX_CONFIG_ARM64 and utils.is_target_arch('aarch64'): 28 28 self.ops = aarch64_page_ops() 29 + elif utils.is_target_arch('x86_64') or utils.is_target_arch('x86-64'): 30 + self.ops = x86_page_ops() 29 31 else: 30 - raise gdb.GdbError('Only support aarch64 now') 32 + raise gdb.GdbError('Only support aarch64 and x86_64 now') 33 + 34 + class x86_page_ops(): 35 + def __init__(self): 36 + self.struct_page_size = utils.get_page_type().sizeof 37 + self.PAGE_SHIFT = constants.LX_CONFIG_PAGE_SHIFT 38 + self.PAGE_SIZE = 1 << self.PAGE_SHIFT 39 + self.PAGE_MASK = (~(self.PAGE_SIZE - 1)) & ((1 << 64) - 1) 40 + 41 + self.PAGE_OFFSET = int(gdb.parse_and_eval("page_offset_base")) 42 + self.VMEMMAP_START = int(gdb.parse_and_eval("vmemmap_base")) 43 + self.PHYS_BASE = int(gdb.parse_and_eval("phys_base")) 44 + self.START_KERNEL_map = 0xffffffff80000000 45 + 46 + self.KERNEL_START = gdb.parse_and_eval("_text") 47 + self.KERNEL_END = gdb.parse_and_eval("_end") 48 + 49 + self.VMALLOC_START = int(gdb.parse_and_eval("vmalloc_base")) 50 + if self.VMALLOC_START == 0xffffc90000000000: 51 + self.VMALLOC_END = self.VMALLOC_START + (32 * 1024 * 1024 * 1024 * 1024) - 1 52 + elif self.VMALLOC_START == 0xffa0000000000000: 53 + self.VMALLOC_END = self.VMALLOC_START + (12800 * 1024 * 1024 * 1024 * 1024) - 1 54 + else: 55 + self.VMALLOC_END = self.VMALLOC_START + (12800 * 1024 * 1024 * 1024 * 1024) - 1 56 + 57 + self.MAX_PHYSMEM_BITS = 46 58 + self.SECTION_SIZE_BITS = 27 59 + self.MAX_ORDER = 10 60 + 61 + self.SECTIONS_SHIFT = self.MAX_PHYSMEM_BITS - self.SECTION_SIZE_BITS 62 + self.NR_MEM_SECTIONS = 1 << self.SECTIONS_SHIFT 63 + self.PFN_SECTION_SHIFT = self.SECTION_SIZE_BITS - self.PAGE_SHIFT 64 + self.PAGES_PER_SECTION = 1 << self.PFN_SECTION_SHIFT 65 + self.PAGE_SECTION_MASK = (~(self.PAGES_PER_SECTION - 1)) & ((1 << 64) - 1) 66 + 67 + if constants.LX_CONFIG_SPARSEMEM_EXTREME: 68 + self.SECTIONS_PER_ROOT = self.PAGE_SIZE // gdb.lookup_type("struct mem_section").sizeof 69 + else: 70 + self.SECTIONS_PER_ROOT = 1 71 + 72 + self.NR_SECTION_ROOTS = DIV_ROUND_UP(self.NR_MEM_SECTIONS, self.SECTIONS_PER_ROOT) 73 + self.SECTION_ROOT_MASK = self.SECTIONS_PER_ROOT - 1 74 + 75 + try: 76 + self.SECTION_HAS_MEM_MAP = 1 << int(gdb.parse_and_eval('SECTION_HAS_MEM_MAP_BIT')) 77 + self.SECTION_IS_EARLY = 1 << int(gdb.parse_and_eval('SECTION_IS_EARLY_BIT')) 78 + except: 79 + self.SECTION_HAS_MEM_MAP = 1 << 0 80 + self.SECTION_IS_EARLY = 1 << 3 81 + 82 + self.SUBSECTION_SHIFT = 21 83 + self.PAGES_PER_SUBSECTION = 1 << (self.SUBSECTION_SHIFT - self.PAGE_SHIFT) 84 + 85 + if constants.LX_CONFIG_NUMA and constants.LX_CONFIG_NODES_SHIFT: 86 + self.NODE_SHIFT = constants.LX_CONFIG_NODES_SHIFT 87 + else: 88 + self.NODE_SHIFT = 0 89 + 90 + self.MAX_NUMNODES = 1 << self.NODE_SHIFT 91 + 92 + self.vmemmap = gdb.Value(self.VMEMMAP_START).cast(utils.get_page_type().pointer()) 93 + 94 + def kasan_reset_tag(self, addr): 95 + return addr 96 + 97 + def SECTION_NR_TO_ROOT(self, sec): 98 + return sec // self.SECTIONS_PER_ROOT 99 + 100 + def __nr_to_section(self, nr): 101 + root = self.SECTION_NR_TO_ROOT(nr) 102 + mem_section = gdb.parse_and_eval("mem_section") 103 + return mem_section[root][nr & self.SECTION_ROOT_MASK] 104 + 105 + def pfn_to_section_nr(self, pfn): 106 + return pfn >> self.PFN_SECTION_SHIFT 107 + 108 + def section_nr_to_pfn(self, sec): 109 + return sec << self.PFN_SECTION_SHIFT 110 + 111 + def __pfn_to_section(self, pfn): 112 + return self.__nr_to_section(self.pfn_to_section_nr(pfn)) 113 + 114 + def pfn_to_section(self, pfn): 115 + return self.__pfn_to_section(pfn) 116 + 117 + def subsection_map_index(self, pfn): 118 + return (pfn & ~(self.PAGE_SECTION_MASK)) // self.PAGES_PER_SUBSECTION 119 + 120 + def pfn_section_valid(self, ms, pfn): 121 + if constants.LX_CONFIG_SPARSEMEM_VMEMMAP: 122 + idx = self.subsection_map_index(pfn) 123 + return test_bit(idx, ms['usage']['subsection_map']) 124 + else: 125 + return True 126 + 127 + def valid_section(self, mem_section): 128 + if mem_section != None and (mem_section['section_mem_map'] & self.SECTION_HAS_MEM_MAP): 129 + return True 130 + return False 131 + 132 + def early_section(self, mem_section): 133 + if mem_section != None and (mem_section['section_mem_map'] & self.SECTION_IS_EARLY): 134 + return True 135 + return False 136 + 137 + def pfn_valid(self, pfn): 138 + ms = None 139 + if self.PHYS_PFN(self.PFN_PHYS(pfn)) != pfn: 140 + return False 141 + if self.pfn_to_section_nr(pfn) >= self.NR_MEM_SECTIONS: 142 + return False 143 + ms = self.__pfn_to_section(pfn) 144 + 145 + if not self.valid_section(ms): 146 + return False 147 + return self.early_section(ms) or self.pfn_section_valid(ms, pfn) 148 + 149 + def PFN_PHYS(self, pfn): 150 + return pfn << self.PAGE_SHIFT 151 + 152 + def PHYS_PFN(self, phys): 153 + return phys >> self.PAGE_SHIFT 154 + 155 + def __phys_to_virt(self, pa): 156 + return pa + self.PAGE_OFFSET 157 + 158 + def __virt_to_phys(self, va): 159 + if va >= self.START_KERNEL_map: 160 + return va - self.START_KERNEL_map + self.PHYS_BASE 161 + else: 162 + return va - self.PAGE_OFFSET 163 + 164 + def virt_to_phys(self, va): 165 + return self.__virt_to_phys(va) 166 + 167 + def virt_to_page(self, va): 168 + return self.pfn_to_page(self.virt_to_pfn(va)) 169 + 170 + def __pa(self, va): 171 + return self.__virt_to_phys(va) 172 + 173 + def __va(self, pa): 174 + return self.__phys_to_virt(pa) 175 + 176 + def pfn_to_kaddr(self, pfn): 177 + return self.__va(pfn << self.PAGE_SHIFT) 178 + 179 + def virt_to_pfn(self, va): 180 + return self.PHYS_PFN(self.__virt_to_phys(va)) 181 + 182 + def sym_to_pfn(self, x): 183 + return self.PHYS_PFN(self.__virt_to_phys(x)) 184 + 185 + def page_to_pfn(self, page): 186 + return int(page.cast(utils.get_page_type().pointer()) - self.vmemmap) 187 + 188 + def pfn_to_page(self, pfn): 189 + return self.vmemmap + pfn 190 + 191 + def page_to_phys(self, page): 192 + return self.PFN_PHYS(self.page_to_pfn(page)) 193 + 194 + def page_to_virt(self, page): 195 + return self.__va(self.page_to_phys(page)) 196 + 197 + def page_address(self, page): 198 + return self.page_to_virt(page) 199 + 200 + def folio_address(self, folio): 201 + return self.page_address(folio['page'].address) 31 202 32 203 class aarch64_page_ops(): 33 204 def __init__(self):