Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 fixes from Martin Schwidefsky:

- four patches to get the new cputime code in shape for s390

- add the new statx system call

- a few bug fixes

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
s390: wire up statx system call
KVM: s390: Fix guest migration for huge guests resulting in panic
s390/ipl: always use load normal for CCW-type re-IPL
s390/timex: micro optimization for tod_to_ns
s390/cputime: provide archicture specific cputime_to_nsecs
s390/cputime: reset all accounting fields on fork
s390/cputime: remove last traces of cputime_t
s390: fix in-kernel program checks
s390/crypt: fix missing unlock in ctr_paes_crypt on error path

+51 -29
+4 -1
arch/s390/crypto/paes_s390.c
··· 474 474 ret = blkcipher_walk_done(desc, walk, nbytes - n); 475 475 } 476 476 if (k < n) { 477 - if (__ctr_paes_set_key(ctx) != 0) 477 + if (__ctr_paes_set_key(ctx) != 0) { 478 + if (locked) 479 + spin_unlock(&ctrblk_lock); 478 480 return blkcipher_walk_done(desc, walk, -EIO); 481 + } 479 482 } 480 483 } 481 484 if (locked)
+8 -12
arch/s390/include/asm/cputime.h
··· 8 8 #define _S390_CPUTIME_H 9 9 10 10 #include <linux/types.h> 11 - #include <asm/div64.h> 11 + #include <asm/timex.h> 12 12 13 13 #define CPUTIME_PER_USEC 4096ULL 14 14 #define CPUTIME_PER_SEC (CPUTIME_PER_USEC * USEC_PER_SEC) 15 15 16 16 /* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */ 17 17 18 - typedef unsigned long long __nocast cputime_t; 19 - typedef unsigned long long __nocast cputime64_t; 20 - 21 18 #define cmpxchg_cputime(ptr, old, new) cmpxchg64(ptr, old, new) 22 19 23 - static inline unsigned long __div(unsigned long long n, unsigned long base) 20 + /* 21 + * Convert cputime to microseconds. 22 + */ 23 + static inline u64 cputime_to_usecs(const u64 cputime) 24 24 { 25 - return n / base; 25 + return cputime >> 12; 26 26 } 27 27 28 28 /* 29 - * Convert cputime to microseconds and back. 29 + * Convert cputime to nanoseconds. 30 30 */ 31 - static inline unsigned int cputime_to_usecs(const cputime_t cputime) 32 - { 33 - return (__force unsigned long long) cputime >> 12; 34 - } 35 - 31 + #define cputime_to_nsecs(cputime) tod_to_ns(cputime) 36 32 37 33 u64 arch_cpu_idle_time(int cpu); 38 34
+4 -8
arch/s390/include/asm/timex.h
··· 206 206 * ns = (todval * 125) >> 9; 207 207 * 208 208 * In order to avoid an overflow with the multiplication we can rewrite this. 209 - * With a split todval == 2^32 * th + tl (th upper 32 bits, tl lower 32 bits) 209 + * With a split todval == 2^9 * th + tl (th upper 55 bits, tl lower 9 bits) 210 210 * we end up with 211 211 * 212 - * ns = ((2^32 * th + tl) * 125 ) >> 9; 213 - * -> ns = (2^23 * th * 125) + ((tl * 125) >> 9); 212 + * ns = ((2^9 * th + tl) * 125 ) >> 9; 213 + * -> ns = (th * 125) + ((tl * 125) >> 9); 214 214 * 215 215 */ 216 216 static inline unsigned long long tod_to_ns(unsigned long long todval) 217 217 { 218 - unsigned long long ns; 219 - 220 - ns = ((todval >> 32) << 23) * 125; 221 - ns += ((todval & 0xffffffff) * 125) >> 9; 222 - return ns; 218 + return ((todval >> 9) * 125) + (((todval & 0x1ff) * 125) >> 9); 223 219 } 224 220 225 221 #endif
+3 -1
arch/s390/include/uapi/asm/unistd.h
··· 313 313 #define __NR_copy_file_range 375 314 314 #define __NR_preadv2 376 315 315 #define __NR_pwritev2 377 316 - #define NR_syscalls 378 316 + /* Number 378 is reserved for guarded storage */ 317 + #define __NR_statx 379 318 + #define NR_syscalls 380 317 319 318 320 /* 319 321 * There are some system calls that are not present on 64 bit, some
+1
arch/s390/kernel/compat_wrapper.c
··· 178 178 COMPAT_SYSCALL_WRAP6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len); 179 179 COMPAT_SYSCALL_WRAP3(mlock2, unsigned long, start, size_t, len, int, flags); 180 180 COMPAT_SYSCALL_WRAP6(copy_file_range, int, fd_in, loff_t __user *, off_in, int, fd_out, loff_t __user *, off_out, size_t, len, unsigned int, flags); 181 + COMPAT_SYSCALL_WRAP5(statx, int, dfd, const char __user *, path, unsigned, flags, unsigned, mask, struct statx __user *, buffer);
+5 -5
arch/s390/kernel/entry.S
··· 490 490 jnz .Lpgm_svcper # -> single stepped svc 491 491 1: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC 492 492 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 493 - j 3f 493 + j 4f 494 494 2: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER 495 495 lg %r15,__LC_KERNEL_STACK 496 496 lgr %r14,%r12 ··· 499 499 tm __LC_PGM_ILC+2,0x02 # check for transaction abort 500 500 jz 3f 501 501 mvc __THREAD_trap_tdb(256,%r14),0(%r13) 502 - 3: la %r11,STACK_FRAME_OVERHEAD(%r15) 503 - stg %r10,__THREAD_last_break(%r14) 502 + 3: stg %r10,__THREAD_last_break(%r14) 503 + 4: la %r11,STACK_FRAME_OVERHEAD(%r15) 504 504 stmg %r0,%r7,__PT_R0(%r11) 505 505 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC 506 506 stmg %r8,%r9,__PT_PSW(%r11) ··· 509 509 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 510 510 stg %r10,__PT_ARGS(%r11) 511 511 tm __LC_PGM_ILC+3,0x80 # check for per exception 512 - jz 4f 512 + jz 5f 513 513 tmhh %r8,0x0001 # kernel per event ? 514 514 jz .Lpgm_kprobe 515 515 oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP 516 516 mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS 517 517 mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE 518 518 mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID 519 - 4: REENABLE_IRQS 519 + 5: REENABLE_IRQS 520 520 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 521 521 larl %r1,pgm_check_table 522 522 llgh %r10,__PT_INT_CODE+2(%r11)
+2
arch/s390/kernel/ipl.c
··· 564 564 565 565 static void __ipl_run(void *unused) 566 566 { 567 + if (MACHINE_IS_LPAR && ipl_info.type == IPL_TYPE_CCW) 568 + diag308(DIAG308_LOAD_NORMAL_DUMP, NULL); 567 569 diag308(DIAG308_LOAD_CLEAR, NULL); 568 570 if (MACHINE_IS_VM) 569 571 __cpcmd("IPL", NULL, 0, NULL);
+3
arch/s390/kernel/process.c
··· 124 124 clear_tsk_thread_flag(p, TIF_SINGLE_STEP); 125 125 /* Initialize per thread user and system timer values */ 126 126 p->thread.user_timer = 0; 127 + p->thread.guest_timer = 0; 127 128 p->thread.system_timer = 0; 129 + p->thread.hardirq_timer = 0; 130 + p->thread.softirq_timer = 0; 128 131 129 132 frame->sf.back_chain = 0; 130 133 /* new return point is ret_from_fork */
+2
arch/s390/kernel/syscalls.S
··· 386 386 SYSCALL(sys_copy_file_range,compat_sys_copy_file_range) /* 375 */ 387 387 SYSCALL(sys_preadv2,compat_sys_preadv2) 388 388 SYSCALL(sys_pwritev2,compat_sys_pwritev2) 389 + NI_SYSCALL 390 + SYSCALL(sys_statx,compat_sys_statx)
+1 -1
arch/s390/kernel/vtime.c
··· 111 111 } 112 112 113 113 static void account_system_index_scaled(struct task_struct *p, 114 - cputime_t cputime, cputime_t scaled, 114 + u64 cputime, u64 scaled, 115 115 enum cpu_usage_stat index) 116 116 { 117 117 p->stimescaled += cputime_to_nsecs(scaled);
+18 -1
arch/s390/mm/pgtable.c
··· 608 608 bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr) 609 609 { 610 610 spinlock_t *ptl; 611 + pgd_t *pgd; 612 + pud_t *pud; 613 + pmd_t *pmd; 611 614 pgste_t pgste; 612 615 pte_t *ptep; 613 616 pte_t pte; 614 617 bool dirty; 615 618 616 - ptep = get_locked_pte(mm, addr, &ptl); 619 + pgd = pgd_offset(mm, addr); 620 + pud = pud_alloc(mm, pgd, addr); 621 + if (!pud) 622 + return false; 623 + pmd = pmd_alloc(mm, pud, addr); 624 + if (!pmd) 625 + return false; 626 + /* We can't run guests backed by huge pages, but userspace can 627 + * still set them up and then try to migrate them without any 628 + * migration support. 629 + */ 630 + if (pmd_large(*pmd)) 631 + return true; 632 + 633 + ptep = pte_alloc_map_lock(mm, pmd, addr, &ptl); 617 634 if (unlikely(!ptep)) 618 635 return false; 619 636