Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 updates from Martin Schwidefsky:
"One performance improvement and a few bug fixes. Two of the fixes
deal with the clock related problems we have seen on recent kernels"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
s390/mm: handle asce-type exceptions as normal page fault
s390,time: revert direct ktime path for s390 clockevent device
s390/time,vdso: convert to the new update_vsyscall interface
s390/uaccess: add missing page table walk range check
s390/mm: optimize copy_page
s390/dasd: validate request size before building CCW/TCW request
s390/signal: always restore saved runtime instrumentation psw bit

+87 -88
+1 -1
arch/s390/Kconfig
··· 101 101 select GENERIC_CPU_DEVICES if !SMP 102 102 select GENERIC_FIND_FIRST_BIT 103 103 select GENERIC_SMP_IDLE_THREAD 104 - select GENERIC_TIME_VSYSCALL_OLD 104 + select GENERIC_TIME_VSYSCALL 105 105 select HAVE_ALIGNED_STRUCT_PAGE if SLUB 106 106 select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 107 107 select HAVE_ARCH_SECCOMP_FILTER
+13 -25
arch/s390/include/asm/page.h
··· 48 48 : "memory", "cc"); 49 49 } 50 50 51 + /* 52 + * copy_page uses the mvcl instruction with 0xb0 padding byte in order to 53 + * bypass caches when copying a page. Especially when copying huge pages 54 + * this keeps L1 and L2 data caches alive. 55 + */ 51 56 static inline void copy_page(void *to, void *from) 52 57 { 53 - if (MACHINE_HAS_MVPG) { 54 - register unsigned long reg0 asm ("0") = 0; 55 - asm volatile( 56 - " mvpg %0,%1" 57 - : : "a" (to), "a" (from), "d" (reg0) 58 - : "memory", "cc"); 59 - } else 60 - asm volatile( 61 - " mvc 0(256,%0),0(%1)\n" 62 - " mvc 256(256,%0),256(%1)\n" 63 - " mvc 512(256,%0),512(%1)\n" 64 - " mvc 768(256,%0),768(%1)\n" 65 - " mvc 1024(256,%0),1024(%1)\n" 66 - " mvc 1280(256,%0),1280(%1)\n" 67 - " mvc 1536(256,%0),1536(%1)\n" 68 - " mvc 1792(256,%0),1792(%1)\n" 69 - " mvc 2048(256,%0),2048(%1)\n" 70 - " mvc 2304(256,%0),2304(%1)\n" 71 - " mvc 2560(256,%0),2560(%1)\n" 72 - " mvc 2816(256,%0),2816(%1)\n" 73 - " mvc 3072(256,%0),3072(%1)\n" 74 - " mvc 3328(256,%0),3328(%1)\n" 75 - " mvc 3584(256,%0),3584(%1)\n" 76 - " mvc 3840(256,%0),3840(%1)\n" 77 - : : "a" (to), "a" (from) : "memory"); 58 + register void *reg2 asm ("2") = to; 59 + register unsigned long reg3 asm ("3") = 0x1000; 60 + register void *reg4 asm ("4") = from; 61 + register unsigned long reg5 asm ("5") = 0xb0001000; 62 + asm volatile( 63 + " mvcl 2,4" 64 + : "+d" (reg2), "+d" (reg3), "+d" (reg4), "+d" (reg5) 65 + : : "memory", "cc"); 78 66 } 79 67 80 68 #define clear_user_page(page, vaddr, pg) clear_page(page)
+3 -2
arch/s390/include/asm/vdso.h
··· 26 26 __u64 wtom_clock_nsec; /* 0x28 */ 27 27 __u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */ 28 28 __u32 tz_dsttime; /* Type of dst correction 0x34 */ 29 - __u32 ectg_available; 30 - __u32 ntp_mult; /* NTP adjusted multiplier 0x3C */ 29 + __u32 ectg_available; /* ECTG instruction present 0x38 */ 30 + __u32 tk_mult; /* Mult. used for xtime_nsec 0x3c */ 31 + __u32 tk_shift; /* Shift used for xtime_nsec 0x40 */ 31 32 }; 32 33 33 34 struct vdso_per_cpu_data {
+2 -1
arch/s390/kernel/asm-offsets.c
··· 65 65 DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); 66 66 DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest)); 67 67 DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available)); 68 - DEFINE(__VDSO_NTP_MULT, offsetof(struct vdso_data, ntp_mult)); 68 + DEFINE(__VDSO_TK_MULT, offsetof(struct vdso_data, tk_mult)); 69 + DEFINE(__VDSO_TK_SHIFT, offsetof(struct vdso_data, tk_shift)); 69 70 DEFINE(__VDSO_ECTG_BASE, offsetof(struct vdso_per_cpu_data, ectg_timer_base)); 70 71 DEFINE(__VDSO_ECTG_USER, offsetof(struct vdso_per_cpu_data, ectg_user_time)); 71 72 /* constants used by the vdso */
+1 -1
arch/s390/kernel/compat_signal.c
··· 194 194 return -EINVAL; 195 195 196 196 /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */ 197 - regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | 197 + regs->psw.mask = (regs->psw.mask & ~(PSW_MASK_USER | PSW_MASK_RI)) | 198 198 (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_USER) << 32 | 199 199 (__u64)(user_sregs.regs.psw.mask & PSW32_MASK_RI) << 32 | 200 200 (__u64)(user_sregs.regs.psw.addr & PSW32_ADDR_AMODE);
+1 -1
arch/s390/kernel/pgm_check.S
··· 78 78 PGM_CHECK_DEFAULT /* 35 */ 79 79 PGM_CHECK_DEFAULT /* 36 */ 80 80 PGM_CHECK_DEFAULT /* 37 */ 81 - PGM_CHECK_DEFAULT /* 38 */ 81 + PGM_CHECK_64BIT(do_dat_exception) /* 38 */ 82 82 PGM_CHECK_64BIT(do_dat_exception) /* 39 */ 83 83 PGM_CHECK_64BIT(do_dat_exception) /* 3a */ 84 84 PGM_CHECK_64BIT(do_dat_exception) /* 3b */
+1 -1
arch/s390/kernel/signal.c
··· 94 94 return -EINVAL; 95 95 96 96 /* Use regs->psw.mask instead of PSW_USER_BITS to preserve PER bit. */ 97 - regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | 97 + regs->psw.mask = (regs->psw.mask & ~(PSW_MASK_USER | PSW_MASK_RI)) | 98 98 (user_sregs.regs.psw.mask & (PSW_MASK_USER | PSW_MASK_RI)); 99 99 /* Check for invalid user address space control. */ 100 100 if ((regs->psw.mask & PSW_MASK_ASC) == PSW_ASC_HOME)
+22 -24
arch/s390/kernel/time.c
··· 108 108 set_clock_comparator(S390_lowcore.clock_comparator); 109 109 } 110 110 111 - static int s390_next_ktime(ktime_t expires, 111 + static int s390_next_event(unsigned long delta, 112 112 struct clock_event_device *evt) 113 113 { 114 - struct timespec ts; 115 - u64 nsecs; 116 - 117 - ts.tv_sec = ts.tv_nsec = 0; 118 - monotonic_to_bootbased(&ts); 119 - nsecs = ktime_to_ns(ktime_add(timespec_to_ktime(ts), expires)); 120 - do_div(nsecs, 125); 121 - S390_lowcore.clock_comparator = sched_clock_base_cc + (nsecs << 9); 122 - /* Program the maximum value if we have an overflow (== year 2042) */ 123 - if (unlikely(S390_lowcore.clock_comparator < sched_clock_base_cc)) 124 - S390_lowcore.clock_comparator = -1ULL; 114 + S390_lowcore.clock_comparator = get_tod_clock() + delta; 125 115 set_clock_comparator(S390_lowcore.clock_comparator); 126 116 return 0; 127 117 } ··· 136 146 cpu = smp_processor_id(); 137 147 cd = &per_cpu(comparators, cpu); 138 148 cd->name = "comparator"; 139 - cd->features = CLOCK_EVT_FEAT_ONESHOT | 140 - CLOCK_EVT_FEAT_KTIME; 149 + cd->features = CLOCK_EVT_FEAT_ONESHOT; 141 150 cd->mult = 16777; 142 151 cd->shift = 12; 143 152 cd->min_delta_ns = 1; 144 153 cd->max_delta_ns = LONG_MAX; 145 154 cd->rating = 400; 146 155 cd->cpumask = cpumask_of(cpu); 147 - cd->set_next_ktime = s390_next_ktime; 156 + cd->set_next_event = s390_next_event; 148 157 cd->set_mode = s390_set_mode; 149 158 150 159 clockevents_register_device(cd); ··· 210 221 return &clocksource_tod; 211 222 } 212 223 213 - void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm, 214 - struct clocksource *clock, u32 mult) 224 + void update_vsyscall(struct timekeeper *tk) 215 225 { 216 - if (clock != &clocksource_tod) 226 + u64 nsecps; 227 + 228 + if (tk->clock != &clocksource_tod) 217 229 return; 218 230 219 231 /* Make userspace gettimeofday spin until we're done. */ 220 232 ++vdso_data->tb_update_count; 221 233 smp_wmb(); 222 - vdso_data->xtime_tod_stamp = clock->cycle_last; 223 - vdso_data->xtime_clock_sec = wall_time->tv_sec; 224 - vdso_data->xtime_clock_nsec = wall_time->tv_nsec; 225 - vdso_data->wtom_clock_sec = wtm->tv_sec; 226 - vdso_data->wtom_clock_nsec = wtm->tv_nsec; 227 - vdso_data->ntp_mult = mult; 234 + vdso_data->xtime_tod_stamp = tk->clock->cycle_last; 235 + vdso_data->xtime_clock_sec = tk->xtime_sec; 236 + vdso_data->xtime_clock_nsec = tk->xtime_nsec; 237 + vdso_data->wtom_clock_sec = 238 + tk->xtime_sec + tk->wall_to_monotonic.tv_sec; 239 + vdso_data->wtom_clock_nsec = tk->xtime_nsec + 240 + + (tk->wall_to_monotonic.tv_nsec << tk->shift); 241 + nsecps = (u64) NSEC_PER_SEC << tk->shift; 242 + while (vdso_data->wtom_clock_nsec >= nsecps) { 243 + vdso_data->wtom_clock_nsec -= nsecps; 244 + vdso_data->wtom_clock_sec++; 245 + } 246 + vdso_data->tk_mult = tk->mult; 247 + vdso_data->tk_shift = tk->shift; 228 248 smp_wmb(); 229 249 ++vdso_data->tb_update_count; 230 250 }
+16 -14
arch/s390/kernel/vdso32/clock_gettime.S
··· 38 38 sl %r1,__VDSO_XTIME_STAMP+4(%r5) 39 39 brc 3,2f 40 40 ahi %r0,-1 41 - 2: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */ 41 + 2: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */ 42 42 lr %r2,%r0 43 - l %r0,__VDSO_NTP_MULT(%r5) 43 + l %r0,__VDSO_TK_MULT(%r5) 44 44 ltr %r1,%r1 45 45 mr %r0,%r0 46 46 jnm 3f 47 - a %r0,__VDSO_NTP_MULT(%r5) 47 + a %r0,__VDSO_TK_MULT(%r5) 48 48 3: alr %r0,%r2 49 - srdl %r0,12 50 - al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ 49 + al %r0,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */ 51 50 al %r1,__VDSO_XTIME_NSEC+4(%r5) 52 51 brc 12,4f 53 52 ahi %r0,1 54 - 4: l %r2,__VDSO_XTIME_SEC+4(%r5) 55 - al %r0,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic */ 53 + 4: al %r0,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic.nsec */ 56 54 al %r1,__VDSO_WTOM_NSEC+4(%r5) 57 55 brc 12,5f 58 56 ahi %r0,1 59 - 5: al %r2,__VDSO_WTOM_SEC+4(%r5) 57 + 5: l %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ 58 + srdl %r0,0(%r2) /* >> tk->shift */ 59 + l %r2,__VDSO_XTIME_SEC+4(%r5) 60 + al %r2,__VDSO_WTOM_SEC+4(%r5) 60 61 cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ 61 62 jne 1b 62 63 basr %r5,0 ··· 87 86 sl %r1,__VDSO_XTIME_STAMP+4(%r5) 88 87 brc 3,12f 89 88 ahi %r0,-1 90 - 12: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */ 89 + 12: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */ 91 90 lr %r2,%r0 92 - l %r0,__VDSO_NTP_MULT(%r5) 91 + l %r0,__VDSO_TK_MULT(%r5) 93 92 ltr %r1,%r1 94 93 mr %r0,%r0 95 94 jnm 13f 96 - a %r0,__VDSO_NTP_MULT(%r5) 95 + a %r0,__VDSO_TK_MULT(%r5) 97 96 13: alr %r0,%r2 98 - srdl %r0,12 99 - al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ 97 + al %r0,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */ 100 98 al %r1,__VDSO_XTIME_NSEC+4(%r5) 101 99 brc 12,14f 102 100 ahi %r0,1 103 - 14: l %r2,__VDSO_XTIME_SEC+4(%r5) 101 + 14: l %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ 102 + srdl %r0,0(%r2) /* >> tk->shift */ 103 + l %r2,__VDSO_XTIME_SEC+4(%r5) 104 104 cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ 105 105 jne 11b 106 106 basr %r5,0
+5 -4
arch/s390/kernel/vdso32/gettimeofday.S
··· 35 35 sl %r1,__VDSO_XTIME_STAMP+4(%r5) 36 36 brc 3,3f 37 37 ahi %r0,-1 38 - 3: ms %r0,__VDSO_NTP_MULT(%r5) /* cyc2ns(clock,cycle_delta) */ 38 + 3: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */ 39 39 st %r0,24(%r15) 40 - l %r0,__VDSO_NTP_MULT(%r5) 40 + l %r0,__VDSO_TK_MULT(%r5) 41 41 ltr %r1,%r1 42 42 mr %r0,%r0 43 43 jnm 4f 44 - a %r0,__VDSO_NTP_MULT(%r5) 44 + a %r0,__VDSO_TK_MULT(%r5) 45 45 4: al %r0,24(%r15) 46 - srdl %r0,12 47 46 al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ 48 47 al %r1,__VDSO_XTIME_NSEC+4(%r5) 49 48 brc 12,5f ··· 50 51 5: mvc 24(4,%r15),__VDSO_XTIME_SEC+4(%r5) 51 52 cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ 52 53 jne 1b 54 + l %r4,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ 55 + srdl %r0,0(%r4) /* >> tk->shift */ 53 56 l %r4,24(%r15) /* get tv_sec from stack */ 54 57 basr %r5,0 55 58 6: ltr %r0,%r0
+12 -10
arch/s390/kernel/vdso64/clock_gettime.S
··· 34 34 tmll %r4,0x0001 /* pending update ? loop */ 35 35 jnz 0b 36 36 stck 48(%r15) /* Store TOD clock */ 37 + lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ 38 + lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */ 39 + alg %r0,__VDSO_WTOM_SEC(%r5) /* + wall_to_monotonic.sec */ 37 40 lg %r1,48(%r15) 38 41 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 39 - msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */ 40 - srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ 41 - alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */ 42 - lg %r0,__VDSO_XTIME_SEC(%r5) 43 - alg %r1,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic */ 44 - alg %r0,__VDSO_WTOM_SEC(%r5) 42 + msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */ 43 + alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */ 44 + alg %r1,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic.nsec */ 45 + srlg %r1,%r1,0(%r2) /* >> tk->shift */ 45 46 clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ 46 47 jne 0b 47 48 larl %r5,13f ··· 63 62 tmll %r4,0x0001 /* pending update ? loop */ 64 63 jnz 5b 65 64 stck 48(%r15) /* Store TOD clock */ 65 + lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ 66 66 lg %r1,48(%r15) 67 67 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 68 - msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */ 69 - srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ 70 - alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime */ 71 - lg %r0,__VDSO_XTIME_SEC(%r5) 68 + msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */ 69 + alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */ 70 + srlg %r1,%r1,0(%r2) /* >> tk->shift */ 71 + lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */ 72 72 clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ 73 73 jne 5b 74 74 larl %r5,13f
+5 -4
arch/s390/kernel/vdso64/gettimeofday.S
··· 31 31 stck 48(%r15) /* Store TOD clock */ 32 32 lg %r1,48(%r15) 33 33 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 34 - msgf %r1,__VDSO_NTP_MULT(%r5) /* * NTP adjustment */ 35 - srlg %r1,%r1,12 /* cyc2ns(clock,cycle_delta) */ 36 - alg %r1,__VDSO_XTIME_NSEC(%r5) /* + xtime.tv_nsec */ 37 - lg %r0,__VDSO_XTIME_SEC(%r5) /* xtime.tv_sec */ 34 + msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */ 35 + alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */ 36 + lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */ 38 37 clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */ 39 38 jne 0b 39 + lgf %r5,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ 40 + srlg %r1,%r1,0(%r5) /* >> tk->shift */ 40 41 larl %r5,5f 41 42 2: clg %r1,0(%r5) 42 43 jl 3f
+3
arch/s390/lib/uaccess_pt.c
··· 78 78 * contains the (negative) exception code. 79 79 */ 80 80 #ifdef CONFIG_64BIT 81 + 81 82 static unsigned long follow_table(struct mm_struct *mm, 82 83 unsigned long address, int write) 83 84 { 84 85 unsigned long *table = (unsigned long *)__pa(mm->pgd); 85 86 87 + if (unlikely(address > mm->context.asce_limit - 1)) 88 + return -0x38UL; 86 89 switch (mm->context.asce_bits & _ASCE_TYPE_MASK) { 87 90 case _ASCE_TYPE_REGION1: 88 91 table = table + ((address >> 53) & 0x7ff);
+2
drivers/s390/block/dasd_eckd.c
··· 3224 3224 3225 3225 fcx_multitrack = private->features.feature[40] & 0x20; 3226 3226 data_size = blk_rq_bytes(req); 3227 + if (data_size % blksize) 3228 + return ERR_PTR(-EINVAL); 3227 3229 /* tpm write request add CBC data on each track boundary */ 3228 3230 if (rq_data_dir(req) == WRITE) 3229 3231 data_size += (last_trk - first_trk) * 4;