Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'kvm-s390-next-7.1-1' of https://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD

- ESA nesting support
- 4k memslots
- LPSW/E fix

+102 -73
+8
Documentation/virt/kvm/api.rst
··· 9438 9438 depending on which executed at the time of an exit. Userspace must 9439 9439 take care to differentiate between these cases. 9440 9440 9441 + 8.47 KVM_CAP_S390_VSIE_ESAMODE 9442 + ------------------------------ 9443 + 9444 + :Architectures: s390 9445 + 9446 + The presence of this capability indicates that the nested KVM guest can 9447 + start in ESA mode. 9448 + 9441 9449 9. Known KVM API problems 9442 9450 ========================= 9443 9451
+1
arch/s390/include/asm/kvm_host.h
··· 656 656 int user_stsi; 657 657 int user_instr0; 658 658 int user_operexec; 659 + int allow_vsie_esamode; 659 660 struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS]; 660 661 wait_queue_head_t ipte_wq; 661 662 int ipte_lock_count;
+1 -2
arch/s390/include/asm/kvm_host_types.h
··· 137 137 struct kvm_s390_sie_block { 138 138 atomic_t cpuflags; /* 0x0000 */ 139 139 __u32 : 1; /* 0x0004 */ 140 - __u32 prefix : 18; 141 - __u32 : 1; 140 + __u32 prefix : 19; 142 141 __u32 ibc : 12; 143 142 __u8 reserved08[4]; /* 0x0008 */ 144 143 #define PROG_IN_SIE (1<<0)
+5
arch/s390/kvm/dat.h
··· 104 104 } tok; 105 105 }; 106 106 107 + #define _SEGMENT_FR_MASK (_SEGMENT_MASK >> PAGE_SHIFT) 108 + #define _REGION3_FR_MASK (_REGION3_MASK >> PAGE_SHIFT) 109 + #define _PAGES_PER_SEGMENT _PAGE_ENTRIES 110 + #define _PAGES_PER_REGION3 (_PAGES_PER_SEGMENT * _CRST_ENTRIES) 111 + 107 112 /* Soft dirty, needed as macro for atomic operations on ptes */ 108 113 #define _PAGE_SD 0x002 109 114
+1 -1
arch/s390/kvm/faultin.c
··· 109 109 scoped_guard(read_lock, &kvm->mmu_lock) { 110 110 if (!mmu_invalidate_retry_gfn(kvm, inv_seq, f->gfn)) { 111 111 f->valid = true; 112 - rc = gmap_link(mc, kvm->arch.gmap, f); 112 + rc = gmap_link(mc, kvm->arch.gmap, f, slot); 113 113 kvm_release_faultin_page(kvm, f->page, !!rc, f->write_attempt); 114 114 f->page = NULL; 115 115 }
+1 -1
arch/s390/kvm/gaccess.c
··· 1471 1471 lockdep_assert_held(&sg->kvm->mmu_lock); 1472 1472 lockdep_assert_held(&sg->parent->children_lock); 1473 1473 1474 - gfn = f->gfn & gpa_to_gfn(is_pmd(*table) ? _SEGMENT_MASK : _REGION3_MASK); 1474 + gfn = f->gfn & (is_pmd(*table) ? _SEGMENT_FR_MASK : _REGION3_FR_MASK); 1475 1475 scoped_guard(spinlock, &sg->host_to_rmap_lock) 1476 1476 rc = gmap_insert_rmap(sg, gfn, gpa_to_gfn(raddr), host->h.tt); 1477 1477 if (rc)
+26 -6
arch/s390/kvm/gmap.c
··· 618 618 return rc; 619 619 } 620 620 621 - static inline bool gmap_2g_allowed(struct gmap *gmap, gfn_t gfn) 621 + static inline bool gmap_2g_allowed(struct gmap *gmap, struct guest_fault *f, 622 + struct kvm_memory_slot *slot) 622 623 { 623 624 return false; 624 625 } 625 626 626 - static inline bool gmap_1m_allowed(struct gmap *gmap, gfn_t gfn) 627 + /** 628 + * gmap_1m_allowed() - Check whether a 1M hugepage is allowed. 629 + * @gmap: The gmap of the guest. 630 + * @f: Describes the fault that is being resolved. 631 + * @slot: The memslot the faulting address belongs to. 632 + * 633 + * The function checks whether the GMAP_FLAG_ALLOW_HPAGE_1M flag is set for 634 + * @gmap, whether the offset of the address in the 1M virtual frame is the 635 + * same as the offset in the physical 1M frame, and finally whether the whole 636 + * 1M page would fit in the given memslot. 637 + * 638 + * Return: true if a 1M hugepage is allowed to back the faulting address, false 639 + * otherwise. 640 + */ 641 + static inline bool gmap_1m_allowed(struct gmap *gmap, struct guest_fault *f, 642 + struct kvm_memory_slot *slot) 627 643 { 628 - return test_bit(GMAP_FLAG_ALLOW_HPAGE_1M, &gmap->flags); 644 + return test_bit(GMAP_FLAG_ALLOW_HPAGE_1M, &gmap->flags) && 645 + !((f->gfn ^ f->pfn) & ~_SEGMENT_FR_MASK) && 646 + slot->base_gfn <= ALIGN_DOWN(f->gfn, _PAGES_PER_SEGMENT) && 647 + slot->base_gfn + slot->npages >= ALIGN(f->gfn + 1, _PAGES_PER_SEGMENT); 629 648 } 630 649 631 650 static int _gmap_link(struct kvm_s390_mmu_cache *mc, struct gmap *gmap, int level, ··· 698 679 return rc; 699 680 } 700 681 701 - int gmap_link(struct kvm_s390_mmu_cache *mc, struct gmap *gmap, struct guest_fault *f) 682 + int gmap_link(struct kvm_s390_mmu_cache *mc, struct gmap *gmap, struct guest_fault *f, 683 + struct kvm_memory_slot *slot) 702 684 { 703 685 unsigned int order; 704 686 int level; ··· 709 689 level = TABLE_TYPE_PAGE_TABLE; 710 690 if (f->page) { 711 691 order = folio_order(page_folio(f->page)); 712 - if (order >= get_order(_REGION3_SIZE) && gmap_2g_allowed(gmap, f->gfn)) 692 + if (order >= get_order(_REGION3_SIZE) && gmap_2g_allowed(gmap, f, slot)) 713 693 level = TABLE_TYPE_REGION3; 714 - else if (order >= get_order(_SEGMENT_SIZE) && gmap_1m_allowed(gmap, f->gfn)) 694 + else if (order >= get_order(_SEGMENT_SIZE) && gmap_1m_allowed(gmap, f, slot)) 715 695 level = TABLE_TYPE_SEGMENT; 716 696 } 717 697 return _gmap_link(mc, gmap, level, f);
+2 -1
arch/s390/kvm/gmap.h
··· 91 91 struct gmap *gmap_new_child(struct gmap *parent, gfn_t limit); 92 92 void gmap_remove_child(struct gmap *child); 93 93 void gmap_dispose(struct gmap *gmap); 94 - int gmap_link(struct kvm_s390_mmu_cache *mc, struct gmap *gmap, struct guest_fault *fault); 94 + int gmap_link(struct kvm_s390_mmu_cache *mc, struct gmap *gmap, struct guest_fault *fault, 95 + struct kvm_memory_slot *slot); 95 96 void gmap_sync_dirty_log(struct gmap *gmap, gfn_t start, gfn_t end); 96 97 int gmap_set_limit(struct gmap *gmap, gfn_t limit); 97 98 int gmap_ucas_translate(struct kvm_s390_mmu_cache *mc, struct gmap *gmap, gpa_t *gaddr);
+3
arch/s390/kvm/interrupt.c
··· 956 956 set_bit(IRQ_PEND_EXT_SERVICE, &fi->masked_irqs); 957 957 spin_unlock(&fi->lock); 958 958 959 + if (!ext.ext_params) 960 + return 0; 961 + 959 962 VCPU_EVENT(vcpu, 4, "deliver: sclp parameter 0x%x", 960 963 ext.ext_params); 961 964 vcpu->stat.deliver_service_signal++;
+13 -15
arch/s390/kvm/kvm-s390.c
··· 629 629 case KVM_CAP_IRQFD_RESAMPLE: 630 630 case KVM_CAP_S390_USER_OPEREXEC: 631 631 case KVM_CAP_S390_KEYOP: 632 + case KVM_CAP_S390_VSIE_ESAMODE: 632 633 r = 1; 633 634 break; 634 635 case KVM_CAP_SET_GUEST_DEBUG2: ··· 925 924 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_OPEREXEC"); 926 925 kvm->arch.user_operexec = 1; 927 926 icpt_operexc_on_all_vcpus(kvm); 927 + r = 0; 928 + break; 929 + case KVM_CAP_S390_VSIE_ESAMODE: 930 + VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_VSIE_ESAMODE"); 931 + kvm->arch.allow_vsie_esamode = 1; 928 932 r = 0; 929 933 break; 930 934 default: ··· 5654 5648 struct kvm_memory_slot *new, 5655 5649 enum kvm_mr_change change) 5656 5650 { 5657 - gpa_t size; 5658 - 5659 - if (kvm_is_ucontrol(kvm) && new->id < KVM_USER_MEM_SLOTS) 5651 + if (kvm_is_ucontrol(kvm) && new && new->id < KVM_USER_MEM_SLOTS) 5660 5652 return -EINVAL; 5661 5653 5662 5654 /* When we are protected, we should not change the memory slots */ ··· 5663 5659 5664 5660 if (change != KVM_MR_DELETE && change != KVM_MR_FLAGS_ONLY) { 5665 5661 /* 5666 - * A few sanity checks. We can have memory slots which have to be 5667 - * located/ended at a segment boundary (1MB). The memory in userland is 5668 - * ok to be fragmented into various different vmas. It is okay to mmap() 5669 - * and munmap() stuff in this slot after doing this call at any time 5662 + * A few sanity checks. The memory in userland is ok to be 5663 + * fragmented into various different vmas. It is okay to mmap() 5664 + * and munmap() stuff in this slot after doing this call at any 5665 + * time. 5670 5666 */ 5671 - 5672 - if (new->userspace_addr & 0xffffful) 5667 + if (new->userspace_addr & ~PAGE_MASK) 5673 5668 return -EINVAL; 5674 - 5675 - size = new->npages * PAGE_SIZE; 5676 - if (size & 0xffffful) 5677 - return -EINVAL; 5678 - 5679 - if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit) 5669 + if ((new->base_gfn + new->npages) * PAGE_SIZE > kvm->arch.mem_limit) 5680 5670 return -EINVAL; 5681 5671 } 5682 5672
+4 -1
arch/s390/kvm/kvm-s390.h
··· 122 122 #endif 123 123 } 124 124 125 - #define GUEST_PREFIX_SHIFT 13 125 + #define GUEST_PREFIX_SHIFT 12 126 + #define GUEST_PREFIX_MASK_ZARCH 0x7fffe 127 + #define GUEST_PREFIX_MASK_ESA 0x7ffff 126 128 static inline u32 kvm_s390_get_prefix(struct kvm_vcpu *vcpu) 127 129 { 128 130 return vcpu->arch.sie_block->prefix << GUEST_PREFIX_SHIFT; ··· 135 133 VCPU_EVENT(vcpu, 3, "set prefix of cpu %03u to 0x%x", vcpu->vcpu_id, 136 134 prefix); 137 135 vcpu->arch.sie_block->prefix = prefix >> GUEST_PREFIX_SHIFT; 136 + vcpu->arch.sie_block->prefix &= GUEST_PREFIX_MASK_ZARCH; 138 137 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 139 138 kvm_make_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu); 140 139 }
+6 -2
arch/s390/kvm/priv.c
··· 714 714 { 715 715 psw_t *gpsw = &vcpu->arch.sie_block->gpsw; 716 716 psw32_t new_psw; 717 - u64 addr; 717 + u64 addr, iaddr; 718 718 int rc; 719 719 u8 ar; 720 720 721 721 vcpu->stat.instruction_lpsw++; 722 722 723 + iaddr = gpsw->addr - kvm_s390_get_ilen(vcpu); 723 724 if (gpsw->mask & PSW_MASK_PSTATE) 724 725 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 725 726 ··· 738 737 gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE; 739 738 if (!is_valid_psw(gpsw)) 740 739 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 740 + vcpu->arch.sie_block->gbea = iaddr; 741 741 return 0; 742 742 } 743 743 744 744 static int handle_lpswe(struct kvm_vcpu *vcpu) 745 745 { 746 746 psw_t new_psw; 747 - u64 addr; 747 + u64 addr, iaddr; 748 748 int rc; 749 749 u8 ar; 750 750 751 751 vcpu->stat.instruction_lpswe++; 752 752 753 + iaddr = vcpu->arch.sie_block->gpsw.addr - kvm_s390_get_ilen(vcpu); 753 754 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 754 755 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 755 756 ··· 764 761 vcpu->arch.sie_block->gpsw = new_psw; 765 762 if (!is_valid_psw(&vcpu->arch.sie_block->gpsw)) 766 763 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 764 + vcpu->arch.sie_block->gbea = iaddr; 767 765 return 0; 768 766 } 769 767
+25 -4
arch/s390/kvm/vsie.c
··· 125 125 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; 126 126 int newflags, cpuflags = atomic_read(&scb_o->cpuflags); 127 127 128 - /* we don't allow ESA/390 guests */ 129 - if (!(cpuflags & CPUSTAT_ZARCH)) 128 + /* we don't allow ESA/390 guests unless explicitly enabled */ 129 + if (!(cpuflags & CPUSTAT_ZARCH) && !vcpu->kvm->arch.allow_vsie_esamode) 130 130 return set_validity_icpt(scb_s, 0x0001U); 131 131 132 132 if (cpuflags & (CPUSTAT_RRF | CPUSTAT_MCDS)) ··· 135 135 return set_validity_icpt(scb_s, 0x0007U); 136 136 137 137 /* intervention requests will be set later */ 138 - newflags = CPUSTAT_ZARCH; 138 + newflags = 0; 139 + if (cpuflags & CPUSTAT_ZARCH) 140 + newflags = CPUSTAT_ZARCH; 139 141 if (cpuflags & CPUSTAT_GED && test_kvm_facility(vcpu->kvm, 8)) 140 142 newflags |= CPUSTAT_GED; 141 143 if (cpuflags & CPUSTAT_GED2 && test_kvm_facility(vcpu->kvm, 78)) { ··· 387 385 return 0; 388 386 } 389 387 388 + static void shadow_esa(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) 389 + { 390 + struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; 391 + 392 + /* Ensure these bits are indeed turned off */ 393 + scb_s->eca &= ~ECA_VX; 394 + scb_s->ecb &= ~(ECB_GS | ECB_TE); 395 + scb_s->ecb3 &= ~ECB3_RI; 396 + scb_s->ecd &= ~ECD_HOSTREGMGMT; 397 + } 398 + 390 399 /* shadow (round up/down) the ibc to avoid validity icpt */ 391 400 static void prepare_ibc(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) 392 401 { ··· 479 466 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; 480 467 /* READ_ONCE does not work on bitfields - use a temporary variable */ 481 468 const uint32_t __new_prefix = scb_o->prefix; 482 - const uint32_t new_prefix = READ_ONCE(__new_prefix); 469 + uint32_t new_prefix = READ_ONCE(__new_prefix); 483 470 const bool wants_tx = READ_ONCE(scb_o->ecb) & ECB_TE; 484 471 bool had_tx = scb_s->ecb & ECB_TE; 485 472 unsigned long new_mso = 0; ··· 526 513 scb_s->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; 527 514 528 515 scb_s->icpua = scb_o->icpua; 516 + 517 + if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_ZARCH)) 518 + new_prefix &= GUEST_PREFIX_MASK_ESA; 519 + else 520 + new_prefix &= GUEST_PREFIX_MASK_ZARCH; 529 521 530 522 if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_SM)) 531 523 new_mso = READ_ONCE(scb_o->mso) & 0xfffffffffff00000UL; ··· 605 587 606 588 scb_s->hpid = HPID_VSIE; 607 589 scb_s->cpnc = scb_o->cpnc; 590 + 591 + if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_ZARCH)) 592 + shadow_esa(vcpu, vsie_page); 608 593 609 594 prepare_ibc(vcpu, vsie_page); 610 595 rc = shadow_crycb(vcpu, vsie_page);
+1
include/uapi/linux/kvm.h
··· 995 995 #define KVM_CAP_ARM_SEA_TO_USER 245 996 996 #define KVM_CAP_S390_USER_OPEREXEC 246 997 997 #define KVM_CAP_S390_KEYOP 247 998 + #define KVM_CAP_S390_VSIE_ESAMODE 248 998 999 999 1000 struct kvm_irq_routing_irqchip { 1000 1001 __u32 irqchip;
+3 -6
tools/testing/selftests/kvm/Makefile.kvm
··· 64 64 TEST_GEN_PROGS_COMMON += kvm_create_max_vcpus 65 65 TEST_GEN_PROGS_COMMON += kvm_page_table_test 66 66 TEST_GEN_PROGS_COMMON += set_memory_region_test 67 + TEST_GEN_PROGS_COMMON += memslot_modification_stress_test 68 + TEST_GEN_PROGS_COMMON += memslot_perf_test 67 69 68 70 # Compiled test targets 69 71 TEST_GEN_PROGS_x86 = $(TEST_GEN_PROGS_COMMON) ··· 153 151 TEST_GEN_PROGS_x86 += dirty_log_perf_test 154 152 TEST_GEN_PROGS_x86 += guest_memfd_test 155 153 TEST_GEN_PROGS_x86 += hardware_disable_test 156 - TEST_GEN_PROGS_x86 += memslot_modification_stress_test 157 - TEST_GEN_PROGS_x86 += memslot_perf_test 158 154 TEST_GEN_PROGS_x86 += mmu_stress_test 159 155 TEST_GEN_PROGS_x86 += rseq_test 160 156 TEST_GEN_PROGS_x86 += steal_time ··· 191 191 TEST_GEN_PROGS_arm64 += dirty_log_perf_test 192 192 TEST_GEN_PROGS_arm64 += get-reg-list 193 193 TEST_GEN_PROGS_arm64 += guest_memfd_test 194 - TEST_GEN_PROGS_arm64 += memslot_modification_stress_test 195 - TEST_GEN_PROGS_arm64 += memslot_perf_test 196 194 TEST_GEN_PROGS_arm64 += mmu_stress_test 197 195 TEST_GEN_PROGS_arm64 += rseq_test 198 196 TEST_GEN_PROGS_arm64 += steal_time ··· 209 211 TEST_GEN_PROGS_s390 += s390/keyop 210 212 TEST_GEN_PROGS_s390 += rseq_test 211 213 TEST_GEN_PROGS_s390 += s390/irq_routing 214 + TEST_GEN_PROGS_s390 += mmu_stress_test 212 215 213 216 TEST_GEN_PROGS_riscv = $(TEST_GEN_PROGS_COMMON) 214 217 TEST_GEN_PROGS_riscv += riscv/sbi_pmu_test ··· 219 220 TEST_GEN_PROGS_riscv += coalesced_io_test 220 221 TEST_GEN_PROGS_riscv += dirty_log_perf_test 221 222 TEST_GEN_PROGS_riscv += get-reg-list 222 - TEST_GEN_PROGS_riscv += memslot_modification_stress_test 223 - TEST_GEN_PROGS_riscv += memslot_perf_test 224 223 TEST_GEN_PROGS_riscv += mmu_stress_test 225 224 TEST_GEN_PROGS_riscv += rseq_test 226 225 TEST_GEN_PROGS_riscv += steal_time
-3
tools/testing/selftests/kvm/dirty_log_test.c
··· 641 641 } 642 642 643 643 #ifdef __s390x__ 644 - /* Align to 1M (segment size) */ 645 - guest_test_phys_mem = align_down(guest_test_phys_mem, 1 << 20); 646 - 647 644 /* 648 645 * The workaround in guest_code() to write all pages prior to the first 649 646 * iteration isn't compatible with the dirty ring, as the dirty ring
-4
tools/testing/selftests/kvm/include/kvm_util.h
··· 1127 1127 { 1128 1128 unsigned int n; 1129 1129 n = vm_num_guest_pages(mode, vm_num_host_pages(mode, num_guest_pages)); 1130 - #ifdef __s390x__ 1131 - /* s390 requires 1M aligned guest sizes */ 1132 - n = (n + 255) & ~255; 1133 - #endif 1134 1130 return n; 1135 1131 } 1136 1132
-3
tools/testing/selftests/kvm/kvm_page_table_test.c
··· 261 261 guest_page_size; 262 262 else 263 263 guest_test_phys_mem = p->phys_offset; 264 - #ifdef __s390x__ 265 - alignment = max(0x100000UL, alignment); 266 - #endif 267 264 guest_test_phys_mem = align_down(guest_test_phys_mem, alignment); 268 265 269 266 /* Set up the shared data structure test_args */
+1 -8
tools/testing/selftests/kvm/lib/kvm_util.c
··· 985 985 struct userspace_mem_region *region; 986 986 size_t backing_src_pagesz = get_backing_src_pagesz(src_type); 987 987 size_t mem_size = npages * vm->page_size; 988 - size_t alignment; 988 + size_t alignment = 1; 989 989 990 990 TEST_REQUIRE_SET_USER_MEMORY_REGION2(); 991 991 ··· 1038 1038 region = calloc(1, sizeof(*region)); 1039 1039 TEST_ASSERT(region != NULL, "Insufficient Memory"); 1040 1040 region->mmap_size = mem_size; 1041 - 1042 - #ifdef __s390x__ 1043 - /* On s390x, the host address must be aligned to 1M (due to PGSTEs) */ 1044 - alignment = 0x100000; 1045 - #else 1046 - alignment = 1; 1047 - #endif 1048 1041 1049 1042 /* 1050 1043 * When using THP mmap is not guaranteed to returned a hugepage aligned
-4
tools/testing/selftests/kvm/lib/memstress.c
··· 196 196 197 197 args->gpa = (region_end_gfn - guest_num_pages - 1) * args->guest_page_size; 198 198 args->gpa = align_down(args->gpa, backing_src_pagesz); 199 - #ifdef __s390x__ 200 - /* Align to 1M (segment size) */ 201 - args->gpa = align_down(args->gpa, 1 << 20); 202 - #endif 203 199 args->size = guest_num_pages * args->guest_page_size; 204 200 pr_info("guest physical test memory: [0x%lx, 0x%lx)\n", 205 201 args->gpa, args->gpa + args->size);
-4
tools/testing/selftests/kvm/pre_fault_memory_test.c
··· 175 175 176 176 alignment = guest_page_size = vm_guest_mode_params[VM_MODE_DEFAULT].page_size; 177 177 gpa = (vm->max_gfn - TEST_NPAGES) * guest_page_size; 178 - #ifdef __s390x__ 179 - alignment = max(0x100000UL, guest_page_size); 180 - #else 181 178 alignment = SZ_2M; 182 - #endif 183 179 gpa = align_down(gpa, alignment); 184 180 gva = gpa & ((1ULL << (vm->va_bits - 1)) - 1); 185 181
+1 -8
tools/testing/selftests/kvm/set_memory_region_test.c
··· 413 413 uint32_t max_mem_slots; 414 414 uint32_t slot; 415 415 void *mem, *mem_aligned, *mem_extra; 416 - size_t alignment; 417 - 418 - #ifdef __s390x__ 419 - /* On s390x, the host address must be aligned to 1M (due to PGSTEs) */ 420 - alignment = 0x100000; 421 - #else 422 - alignment = 1; 423 - #endif 416 + size_t alignment = 1; 424 417 425 418 max_mem_slots = kvm_check_cap(KVM_CAP_NR_MEMSLOTS); 426 419 TEST_ASSERT(max_mem_slots > 0,