Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * KVM selftest s390x library code - CPU-related functions (page tables...)
4 *
5 * Copyright (C) 2019, Red Hat, Inc.
6 */
7
8#include "processor.h"
9#include "kvm_util.h"
10
11#define PAGES_PER_REGION 4
12
13void virt_arch_pgd_alloc(struct kvm_vm *vm)
14{
15 gpa_t gpa;
16
17 TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x",
18 vm->page_size);
19
20 if (vm->mmu.pgd_created)
21 return;
22
23 gpa = vm_phy_pages_alloc(vm, PAGES_PER_REGION,
24 KVM_GUEST_PAGE_TABLE_MIN_PADDR,
25 vm->memslots[MEM_REGION_PT]);
26 memset(addr_gpa2hva(vm, gpa), 0xff, PAGES_PER_REGION * vm->page_size);
27
28 vm->mmu.pgd = gpa;
29 vm->mmu.pgd_created = true;
30}
31
32/*
33 * Allocate 4 pages for a region/segment table (ri < 4), or one page for
34 * a page table (ri == 4). Returns a suitable region/segment table entry
35 * which points to the freshly allocated pages.
36 */
37static u64 virt_alloc_region(struct kvm_vm *vm, int ri)
38{
39 u64 taddr;
40
41 taddr = vm_phy_pages_alloc(vm, ri < 4 ? PAGES_PER_REGION : 1,
42 KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
43 memset(addr_gpa2hva(vm, taddr), 0xff, PAGES_PER_REGION * vm->page_size);
44
45 return (taddr & REGION_ENTRY_ORIGIN)
46 | (((4 - ri) << 2) & REGION_ENTRY_TYPE)
47 | ((ri < 4 ? (PAGES_PER_REGION - 1) : 0) & REGION_ENTRY_LENGTH);
48}
49
50void virt_arch_pg_map(struct kvm_vm *vm, gva_t gva, gpa_t gpa)
51{
52 int ri, idx;
53 u64 *entry;
54
55 TEST_ASSERT((gva % vm->page_size) == 0,
56 "Virtual address not on page boundary,\n"
57 " gva: 0x%lx vm->page_size: 0x%x",
58 gva, vm->page_size);
59 TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (gva >> vm->page_shift)),
60 "Invalid virtual address, gva: 0x%lx", gva);
61 TEST_ASSERT((gpa % vm->page_size) == 0,
62 "Physical address not on page boundary,\n"
63 " gpa: 0x%lx vm->page_size: 0x%x",
64 gva, vm->page_size);
65 TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn,
66 "Physical address beyond beyond maximum supported,\n"
67 " gpa: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
68 gva, vm->max_gfn, vm->page_size);
69
70 /* Walk through region and segment tables */
71 entry = addr_gpa2hva(vm, vm->mmu.pgd);
72 for (ri = 1; ri <= 4; ri++) {
73 idx = (gva >> (64 - 11 * ri)) & 0x7ffu;
74 if (entry[idx] & REGION_ENTRY_INVALID)
75 entry[idx] = virt_alloc_region(vm, ri);
76 entry = addr_gpa2hva(vm, entry[idx] & REGION_ENTRY_ORIGIN);
77 }
78
79 /* Fill in page table entry */
80 idx = (gva >> PAGE_SHIFT) & 0x0ffu; /* page index */
81 if (!(entry[idx] & PAGE_INVALID))
82 fprintf(stderr,
83 "WARNING: PTE for gpa=0x%"PRIx64" already set!\n", gpa);
84 entry[idx] = gpa;
85}
86
87gpa_t addr_arch_gva2gpa(struct kvm_vm *vm, gva_t gva)
88{
89 int ri, idx;
90 u64 *entry;
91
92 TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x",
93 vm->page_size);
94
95 entry = addr_gpa2hva(vm, vm->mmu.pgd);
96 for (ri = 1; ri <= 4; ri++) {
97 idx = (gva >> (64 - 11 * ri)) & 0x7ffu;
98 TEST_ASSERT(!(entry[idx] & REGION_ENTRY_INVALID),
99 "No region mapping for vm virtual address 0x%lx",
100 gva);
101 entry = addr_gpa2hva(vm, entry[idx] & REGION_ENTRY_ORIGIN);
102 }
103
104 idx = (gva >> PAGE_SHIFT) & 0x0ffu; /* page index */
105
106 TEST_ASSERT(!(entry[idx] & PAGE_INVALID),
107 "No page mapping for vm virtual address 0x%lx", gva);
108
109 return (entry[idx] & ~0xffful) + (gva & 0xffful);
110}
111
112static void virt_dump_ptes(FILE *stream, struct kvm_vm *vm, u8 indent,
113 u64 ptea_start)
114{
115 u64 *pte, ptea;
116
117 for (ptea = ptea_start; ptea < ptea_start + 0x100 * 8; ptea += 8) {
118 pte = addr_gpa2hva(vm, ptea);
119 if (*pte & PAGE_INVALID)
120 continue;
121 fprintf(stream, "%*spte @ 0x%lx: 0x%016lx\n",
122 indent, "", ptea, *pte);
123 }
124}
125
126static void virt_dump_region(FILE *stream, struct kvm_vm *vm, u8 indent,
127 u64 reg_tab_addr)
128{
129 u64 addr, *entry;
130
131 for (addr = reg_tab_addr; addr < reg_tab_addr + 0x400 * 8; addr += 8) {
132 entry = addr_gpa2hva(vm, addr);
133 if (*entry & REGION_ENTRY_INVALID)
134 continue;
135 fprintf(stream, "%*srt%lde @ 0x%lx: 0x%016lx\n",
136 indent, "", 4 - ((*entry & REGION_ENTRY_TYPE) >> 2),
137 addr, *entry);
138 if (*entry & REGION_ENTRY_TYPE) {
139 virt_dump_region(stream, vm, indent + 2,
140 *entry & REGION_ENTRY_ORIGIN);
141 } else {
142 virt_dump_ptes(stream, vm, indent + 2,
143 *entry & REGION_ENTRY_ORIGIN);
144 }
145 }
146}
147
148void virt_arch_dump(FILE *stream, struct kvm_vm *vm, u8 indent)
149{
150 if (!vm->mmu.pgd_created)
151 return;
152
153 virt_dump_region(stream, vm, indent, vm->mmu.pgd);
154}
155
156void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
157{
158 vcpu->run->psw_addr = (uintptr_t)guest_code;
159}
160
161struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, u32 vcpu_id)
162{
163 size_t stack_size = DEFAULT_STACK_PGS * getpagesize();
164 u64 stack_gva;
165 struct kvm_regs regs;
166 struct kvm_sregs sregs;
167 struct kvm_vcpu *vcpu;
168
169 TEST_ASSERT(vm->page_size == PAGE_SIZE, "Unsupported page size: 0x%x",
170 vm->page_size);
171
172 stack_gva = __vm_alloc(vm, stack_size, DEFAULT_GUEST_STACK_VADDR_MIN,
173 MEM_REGION_DATA);
174
175 vcpu = __vm_vcpu_add(vm, vcpu_id);
176
177 /* Setup guest registers */
178 vcpu_regs_get(vcpu, ®s);
179 regs.gprs[15] = stack_gva + (DEFAULT_STACK_PGS * getpagesize()) - 160;
180 vcpu_regs_set(vcpu, ®s);
181
182 vcpu_sregs_get(vcpu, &sregs);
183 sregs.crs[0] |= 0x00040000; /* Enable floating point regs */
184 sregs.crs[1] = vm->mmu.pgd | 0xf; /* Primary region table */
185 vcpu_sregs_set(vcpu, &sregs);
186
187 vcpu->run->psw_mask = 0x0400000180000000ULL; /* DAT enabled + 64 bit mode */
188
189 return vcpu;
190}
191
192void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
193{
194 va_list ap;
195 struct kvm_regs regs;
196 int i;
197
198 TEST_ASSERT(num >= 1 && num <= 5, "Unsupported number of args,\n"
199 " num: %u",
200 num);
201
202 va_start(ap, num);
203 vcpu_regs_get(vcpu, ®s);
204
205 for (i = 0; i < num; i++)
206 regs.gprs[i + 2] = va_arg(ap, u64);
207
208 vcpu_regs_set(vcpu, ®s);
209 va_end(ap);
210}
211
212void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, u8 indent)
213{
214 fprintf(stream, "%*spstate: psw: 0x%.16llx:0x%.16llx\n",
215 indent, "", vcpu->run->psw_mask, vcpu->run->psw_addr);
216}
217
218void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
219{
220}
221
222bool kvm_arch_has_default_irqchip(void)
223{
224 return true;
225}