Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6

* 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6:
[S390] cio: Call cancel_halt_clear even when actl == 0.
[S390] cio: Use path verification to check for path state.
[S390] cio: Fix locking when calling notify function.
[S390] Fixed handling of access register mode faults.
[S390] dasd: Use default recovery for SNSS requests
[S390] check_bugs() should be inline.
[S390] tape: Compression overwrites crypto setting
[S390] nss: disable kexec.
[S390] reipl: move dump_prefix_page out of text section.
[S390] smp: disable preemption in smp_call_function/smp_call_function_on
[S390] kprobes breaks BUG_ON

+166 -162
+2
arch/s390/Kconfig
··· 376 376 Select this option, if you want to share the text segment of the 377 377 Linux kernel between different VM guests. This reduces memory 378 378 usage with lots of guests but greatly increases kernel size. 379 + Also if a kernel was IPL'ed from a shared segment the kexec system 380 + call will not work. 379 381 You should only select this option if you know what you are 380 382 doing and want to exploit this feature. 381 383
+8 -3
arch/s390/kernel/head31.S
··· 121 121 .long .Lduct # cr2: dispatchable unit control table 122 122 .long 0 # cr3: instruction authorization 123 123 .long 0 # cr4: instruction authorization 124 - .long 0xffffffff # cr5: primary-aste origin 124 + .long .Lduct # cr5: primary-aste origin 125 125 .long 0 # cr6: I/O interrupts 126 126 .long 0 # cr7: secondary space segment table 127 127 .long 0 # cr8: access registers translation ··· 132 132 .long 0 # cr13: home space segment table 133 133 .long 0xc0000000 # cr14: machine check handling off 134 134 .long 0 # cr15: linkage stack operations 135 - .Lduct: .long 0,0,0,0,0,0,0,0 136 - .long 0,0,0,0,0,0,0,0 137 135 .Lpcfpu:.long 0x00080000,0x80000000 + .Lchkfpu 138 136 .Lpccsp:.long 0x00080000,0x80000000 + .Lchkcsp 139 137 .Lpcmvpg:.long 0x00080000,0x80000000 + .Lchkmvpg ··· 145 147 .Linittu: .long init_thread_union 146 148 .Lstartup_init: 147 149 .long startup_init 150 + .align 64 151 + .Lduct: .long 0,0,0,0,.Lduald,0,0,0 152 + .long 0,0,0,0,0,0,0,0 153 + .align 128 154 + .Lduald:.rept 8 155 + .long 0x80000000,0,0,0 # invalid access-list entries 156 + .endr 148 157 149 158 .org 0x12000 150 159 .globl _ehead
+8 -3
arch/s390/kernel/head64.S
··· 134 134 .quad .Lduct # cr2: dispatchable unit control table 135 135 .quad 0 # cr3: instruction authorization 136 136 .quad 0 # cr4: instruction authorization 137 - .quad 0xffffffffffffffff # cr5: primary-aste origin 137 + .quad .Lduct # cr5: primary-aste origin 138 138 .quad 0 # cr6: I/O interrupts 139 139 .quad 0 # cr7: secondary space segment table 140 140 .quad 0 # cr8: access registers translation ··· 145 145 .quad 0 # cr13: home space segment table 146 146 .quad 0xc0000000 # cr14: machine check handling off 147 147 .quad 0 # cr15: linkage stack operations 148 - .Lduct: .long 0,0,0,0,0,0,0,0 149 - .long 0,0,0,0,0,0,0,0 150 148 .Lpcmsk:.quad 0x0000000180000000 151 149 .L4malign:.quad 0xffffffffffc00000 152 150 .Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8 153 151 .Lnop: .long 0x07000700 154 152 .Lparmaddr: 155 153 .quad PARMAREA 154 + .align 64 155 + .Lduct: .long 0,0,0,0,.Lduald,0,0,0 156 + .long 0,0,0,0,0,0,0,0 157 + .align 128 158 + .Lduald:.rept 8 159 + .long 0x80000000,0,0,0 # invalid access-list entries 160 + .endr 156 161 157 162 .org 0x12000 158 163 .globl _ehead
+2 -2
arch/s390/kernel/ipl.c
··· 1066 1066 reset->fn(); 1067 1067 } 1068 1068 1069 - extern __u32 dump_prefix_page; 1069 + u32 dump_prefix_page; 1070 1070 1071 1071 void s390_reset_system(void) 1072 1072 { ··· 1078 1078 lc->panic_stack = S390_lowcore.panic_stack; 1079 1079 1080 1080 /* Save prefix page address for dump case */ 1081 - dump_prefix_page = (unsigned long) lc; 1081 + dump_prefix_page = (u32)(unsigned long) lc; 1082 1082 1083 1083 /* Disable prefixing */ 1084 1084 set_prefix(0);
+7 -14
arch/s390/kernel/kprobes.c
··· 337 337 } 338 338 339 339 p = get_kprobe(addr); 340 - if (!p) { 341 - if (*addr != BREAKPOINT_INSTRUCTION) { 342 - /* 343 - * The breakpoint instruction was removed right 344 - * after we hit it. Another cpu has removed 345 - * either a probepoint or a debugger breakpoint 346 - * at this address. In either case, no further 347 - * handling of this interrupt is appropriate. 348 - * 349 - */ 350 - ret = 1; 351 - } 352 - /* Not one of ours: let kernel handle it */ 340 + if (!p) 341 + /* 342 + * No kprobe at this address. The fault has not been 343 + * caused by a kprobe breakpoint. The race of breakpoint 344 + * vs. kprobe remove does not exist because on s390 we 345 + * use stop_machine_run to arm/disarm the breakpoints. 346 + */ 353 347 goto no_kprobe; 354 - } 355 348 356 349 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 357 350 set_current_kprobe(p, regs, kcb);
+5
arch/s390/kernel/machine_kexec.c
··· 19 19 #include <asm/system.h> 20 20 #include <asm/smp.h> 21 21 #include <asm/reset.h> 22 + #include <asm/ipl.h> 22 23 23 24 typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long); 24 25 ··· 29 28 int machine_kexec_prepare(struct kimage *image) 30 29 { 31 30 void *reboot_code_buffer; 31 + 32 + /* Can't replace kernel image since it is read-only. */ 33 + if (ipl_flags & IPL_NSS_VALID) 34 + return -ENOSYS; 32 35 33 36 /* We don't support anything but the default image type for now. */ 34 37 if (image->type != KEXEC_TYPE_DEFAULT)
+7 -6
arch/s390/kernel/reipl.S
··· 8 8 9 9 #include <asm/lowcore.h> 10 10 11 + # 12 + # do_reipl_asm 13 + # Parameter: r2 = schid of reipl device 14 + # 11 15 .globl do_reipl_asm 12 16 do_reipl_asm: basr %r13,0 13 17 .Lpg0: lpsw .Lnewpsw-.Lpg0(%r13) ··· 20 16 stm %r0,%r15,__LC_GPREGS_SAVE_AREA 21 17 stctl %c0,%c15,__LC_CREGS_SAVE_AREA 22 18 stam %a0,%a15,__LC_AREGS_SAVE_AREA 23 - mvc __LC_PREFIX_SAVE_AREA(4),dump_prefix_page-.Lpg0(%r13) 19 + l %r10,.Ldump_pfx-.Lpg0(%r13) 20 + mvc __LC_PREFIX_SAVE_AREA(4),0(%r10) 24 21 stckc .Lclkcmp-.Lpg0(%r13) 25 22 mvc __LC_CLOCK_COMP_SAVE_AREA(8),.Lclkcmp-.Lpg0(%r13) 26 23 stpt __LC_CPU_TIMER_SAVE_AREA 27 24 st %r13, __LC_PSW_SAVE_AREA+4 28 - 29 25 lctl %c6,%c6,.Lall-.Lpg0(%r13) 30 26 lr %r1,%r2 31 27 mvc __LC_PGM_NEW_PSW(8),.Lpcnew-.Lpg0(%r13) ··· 59 55 .align 8 60 56 .Lclkcmp: .quad 0x0000000000000000 61 57 .Lall: .long 0xff000000 58 + .Ldump_pfx: .long dump_prefix_page 62 59 .align 8 63 60 .Lnewpsw: .long 0x00080000,0x80000000+.Lpg1 64 61 .Lpcnew: .long 0x00080000,0x80000000+.Lecs ··· 84 79 .long 0x00000000,0x00000000 85 80 .long 0x00000000,0x00000000 86 81 .long 0x00000000,0x00000000 87 - .globl dump_prefix_page 88 - dump_prefix_page: 89 - .long 0x00000000 90 -
+9 -4
arch/s390/kernel/reipl64.S
··· 8 8 */ 9 9 10 10 #include <asm/lowcore.h> 11 + 12 + # 13 + # do_reipl_asm 14 + # Parameter: r2 = schid of reipl device 15 + # 16 + 11 17 .globl do_reipl_asm 12 18 do_reipl_asm: basr %r13,0 13 19 .Lpg0: lpswe .Lnewpsw-.Lpg0(%r13) ··· 26 20 stg %r0,__LC_GPREGS_SAVE_AREA-0x1000+8(%r1) 27 21 stctg %c0,%c15,__LC_CREGS_SAVE_AREA-0x1000(%r1) 28 22 stam %a0,%a15,__LC_AREGS_SAVE_AREA-0x1000(%r1) 29 - mvc __LC_PREFIX_SAVE_AREA-0x1000(4,%r1),dump_prefix_page-.Lpg0(%r13) 23 + lg %r10,.Ldump_pfx-.Lpg0(%r13) 24 + mvc __LC_PREFIX_SAVE_AREA-0x1000(4,%r1),0(%r10) 30 25 stfpc __LC_FP_CREG_SAVE_AREA-0x1000(%r1) 31 26 stckc .Lclkcmp-.Lpg0(%r13) 32 27 mvc __LC_CLOCK_COMP_SAVE_AREA-0x1000(8,%r1),.Lclkcmp-.Lpg0(%r13) ··· 71 64 .align 8 72 65 .Lclkcmp: .quad 0x0000000000000000 73 66 .Lall: .quad 0x00000000ff000000 67 + .Ldump_pfx: .quad dump_prefix_page 74 68 .Lregsave: .quad 0x0000000000000000 75 69 .align 16 76 70 /* ··· 111 103 .long 0x00000000,0x00000000 112 104 .long 0x00000000,0x00000000 113 105 .long 0x00000000,0x00000000 114 - .globl dump_prefix_page 115 - dump_prefix_page: 116 - .long 0x00000000
+8 -7
arch/s390/kernel/smp.c
··· 94 94 int cpu, local = 0; 95 95 96 96 /* 97 - * Can deadlock when interrupts are disabled or if in wrong context, 98 - * caller must disable preemption 97 + * Can deadlock when interrupts are disabled or if in wrong context. 99 98 */ 100 - WARN_ON(irqs_disabled() || in_irq() || preemptible()); 99 + WARN_ON(irqs_disabled() || in_irq()); 101 100 102 101 /* 103 102 * Check for local function call. We have to have the same call order ··· 151 152 * Run a function on all other CPUs. 152 153 * 153 154 * You must not call this function with disabled interrupts or from a 154 - * hardware interrupt handler. Must be called with preemption disabled. 155 - * You may call it from a bottom half. 155 + * hardware interrupt handler. You may call it from a bottom half. 156 156 */ 157 157 int smp_call_function(void (*func) (void *info), void *info, int nonatomic, 158 158 int wait) 159 159 { 160 160 cpumask_t map; 161 161 162 + preempt_disable(); 162 163 map = cpu_online_map; 163 164 cpu_clear(smp_processor_id(), map); 164 165 __smp_call_function_map(func, info, nonatomic, wait, map); 166 + preempt_enable(); 165 167 return 0; 166 168 } 167 169 EXPORT_SYMBOL(smp_call_function); ··· 178 178 * Run a function on one processor. 179 179 * 180 180 * You must not call this function with disabled interrupts or from a 181 - * hardware interrupt handler. Must be called with preemption disabled. 182 - * You may call it from a bottom half. 181 + * hardware interrupt handler. You may call it from a bottom half. 183 182 */ 184 183 int smp_call_function_on(void (*func) (void *info), void *info, int nonatomic, 185 184 int wait, int cpu) 186 185 { 187 186 cpumask_t map = CPU_MASK_NONE; 188 187 188 + preempt_disable(); 189 189 cpu_set(cpu, map); 190 190 __smp_call_function_map(func, info, nonatomic, wait, map); 191 + preempt_enable(); 191 192 return 0; 192 193 } 193 194 EXPORT_SYMBOL(smp_call_function_on);
+47 -58
arch/s390/mm/fault.c
··· 108 108 } 109 109 110 110 /* 111 - * Check which address space is addressed by the access 112 - * register in S390_lowcore.exc_access_id. 113 - * Returns 1 for user space and 0 for kernel space. 111 + * Returns the address space associated with the fault. 112 + * Returns 0 for kernel space, 1 for user space and 113 + * 2 for code execution in user space with noexec=on. 114 114 */ 115 - static int __check_access_register(struct pt_regs *regs, int error_code) 116 - { 117 - int areg = S390_lowcore.exc_access_id; 118 - 119 - if (areg == 0) 120 - /* Access via access register 0 -> kernel address */ 121 - return 0; 122 - save_access_regs(current->thread.acrs); 123 - if (regs && areg < NUM_ACRS && current->thread.acrs[areg] <= 1) 124 - /* 125 - * access register contains 0 -> kernel address, 126 - * access register contains 1 -> user space address 127 - */ 128 - return current->thread.acrs[areg]; 129 - 130 - /* Something unhealthy was done with the access registers... */ 131 - die("page fault via unknown access register", regs, error_code); 132 - do_exit(SIGKILL); 133 - return 0; 134 - } 135 - 136 - /* 137 - * Check which address space the address belongs to. 138 - * May return 1 or 2 for user space and 0 for kernel space. 139 - * Returns 2 for user space in primary addressing mode with 140 - * CONFIG_S390_EXEC_PROTECT on and kernel parameter noexec=on. 141 - */ 142 - static inline int check_user_space(struct pt_regs *regs, int error_code) 115 + static inline int check_space(struct task_struct *tsk) 143 116 { 144 117 /* 145 - * The lowest two bits of S390_lowcore.trans_exc_code indicate 146 - * which paging table was used: 147 - * 0: Primary Segment Table Descriptor 148 - * 1: STD determined via access register 149 - * 2: Secondary Segment Table Descriptor 150 - * 3: Home Segment Table Descriptor 118 + * The lowest two bits of S390_lowcore.trans_exc_code 119 + * indicate which paging table was used. 151 120 */ 152 - int descriptor = S390_lowcore.trans_exc_code & 3; 153 - if (unlikely(descriptor == 1)) 154 - return __check_access_register(regs, error_code); 155 - if (descriptor == 2) 156 - return current->thread.mm_segment.ar4; 157 - return ((descriptor != 0) ^ (switch_amode)) << s390_noexec; 121 + int desc = S390_lowcore.trans_exc_code & 3; 122 + 123 + if (desc == 3) /* Home Segment Table Descriptor */ 124 + return switch_amode == 0; 125 + if (desc == 2) /* Secondary Segment Table Descriptor */ 126 + return tsk->thread.mm_segment.ar4; 127 + #ifdef CONFIG_S390_SWITCH_AMODE 128 + if (unlikely(desc == 1)) { /* STD determined via access register */ 129 + /* %a0 always indicates primary space. */ 130 + if (S390_lowcore.exc_access_id != 0) { 131 + save_access_regs(tsk->thread.acrs); 132 + /* 133 + * An alet of 0 indicates primary space. 134 + * An alet of 1 indicates secondary space. 135 + * Any other alet values generate an 136 + * alen-translation exception. 137 + */ 138 + if (tsk->thread.acrs[S390_lowcore.exc_access_id]) 139 + return tsk->thread.mm_segment.ar4; 140 + } 141 + } 142 + #endif 143 + /* Primary Segment Table Descriptor */ 144 + return switch_amode << s390_noexec; 158 145 } 159 146 160 147 /* ··· 252 265 * 11 Page translation -> Not present (nullification) 253 266 * 3b Region third trans. -> Not present (nullification) 254 267 */ 255 - static inline void __kprobes 268 + static inline void 256 269 do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection) 257 270 { 258 271 struct task_struct *tsk; 259 272 struct mm_struct *mm; 260 273 struct vm_area_struct * vma; 261 274 unsigned long address; 262 - int user_address; 263 275 const struct exception_table_entry *fixup; 264 - int si_code = SEGV_MAPERR; 276 + int si_code; 277 + int space; 265 278 266 279 tsk = current; 267 280 mm = tsk->mm; ··· 281 294 NULL pointer write access in kernel mode. */ 282 295 if (!(regs->psw.mask & PSW_MASK_PSTATE)) { 283 296 address = 0; 284 - user_address = 0; 297 + space = 0; 285 298 goto no_context; 286 299 } 287 300 ··· 296 309 * the address 297 310 */ 298 311 address = S390_lowcore.trans_exc_code & __FAIL_ADDR_MASK; 299 - user_address = check_user_space(regs, error_code); 312 + space = check_space(tsk); 300 313 301 314 /* 302 315 * Verify that the fault happened in user space, that 303 316 * we are not in an interrupt and that there is a 304 317 * user context. 305 318 */ 306 - if (user_address == 0 || in_atomic() || !mm) 307 - goto no_context; 319 + if (unlikely(space == 0 || in_atomic() || !mm)) 320 + goto no_context; 308 321 309 322 /* 310 323 * When we get here, the fault happened in the current ··· 315 328 316 329 down_read(&mm->mmap_sem); 317 330 318 - vma = find_vma(mm, address); 319 - if (!vma) 320 - goto bad_area; 331 + si_code = SEGV_MAPERR; 332 + vma = find_vma(mm, address); 333 + if (!vma) 334 + goto bad_area; 321 335 322 336 #ifdef CONFIG_S390_EXEC_PROTECT 323 - if (unlikely((user_address == 2) && !(vma->vm_flags & VM_EXEC))) 337 + if (unlikely((space == 2) && !(vma->vm_flags & VM_EXEC))) 324 338 if (!signal_return(mm, regs, address, error_code)) 325 339 /* 326 340 * signal_return() has done an up_read(&mm->mmap_sem) ··· 377 389 * The instruction that caused the program check will 378 390 * be repeated. Don't signal single step via SIGTRAP. 379 391 */ 380 - clear_tsk_thread_flag(current, TIF_SINGLE_STEP); 392 + clear_tsk_thread_flag(tsk, TIF_SINGLE_STEP); 381 393 return; 382 394 383 395 /* ··· 407 419 * Oops. The kernel tried to access some bad page. We'll have to 408 420 * terminate things with extreme prejudice. 409 421 */ 410 - if (user_address == 0) 422 + if (space == 0) 411 423 printk(KERN_ALERT "Unable to handle kernel pointer dereference" 412 424 " at virtual kernel address %p\n", (void *)address); 413 425 else ··· 450 462 goto no_context; 451 463 } 452 464 453 - void do_protection_exception(struct pt_regs *regs, unsigned long error_code) 465 + void __kprobes do_protection_exception(struct pt_regs *regs, 466 + unsigned long error_code) 454 467 { 455 468 regs->psw.addr -= (error_code >> 16); 456 469 do_exception(regs, 4, 1); 457 470 } 458 471 459 - void do_dat_exception(struct pt_regs *regs, unsigned long error_code) 472 + void __kprobes do_dat_exception(struct pt_regs *regs, unsigned long error_code) 460 473 { 461 474 do_exception(regs, error_code & 0xff, 0); 462 475 }
+1
drivers/s390/block/dasd_eer.c
··· 461 461 cqr->device = device; 462 462 cqr->retries = 255; 463 463 cqr->expires = 10 * HZ; 464 + clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 464 465 465 466 cqr->cpaddr->cmd_code = DASD_ECKD_CCW_SNSS; 466 467 cqr->cpaddr->count = SNSS_DATA_SIZE;
+4 -1
drivers/s390/char/tape_std.c
··· 647 647 return PTR_ERR(request); 648 648 request->op = TO_NOP; 649 649 /* setup ccws */ 650 - *device->modeset_byte = (mt_count == 0) ? 0x00 : 0x08; 650 + if (mt_count == 0) 651 + *device->modeset_byte &= ~0x08; 652 + else 653 + *device->modeset_byte |= 0x08; 651 654 tape_ccw_cc(request->cpaddr, MODE_SET_DB, 1, device->modeset_byte); 652 655 tape_ccw_end(request->cpaddr + 1, NOP, 0, NULL); 653 656 /* execute it */
+56 -63
drivers/s390/cio/device_fsm.c
··· 144 144 ret = stsch(sch->schid, &sch->schib); 145 145 if (ret || !sch->schib.pmcw.dnv) 146 146 return -ENODEV; 147 - if (!sch->schib.pmcw.ena || sch->schib.scsw.actl == 0) 148 - /* Not operational or no activity -> done. */ 147 + if (!sch->schib.pmcw.ena) 148 + /* Not operational -> done. */ 149 149 return 0; 150 150 /* Stage 1: cancel io. */ 151 151 if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) && ··· 334 334 struct ccw_device *cdev; 335 335 struct subchannel *sch; 336 336 int ret; 337 + unsigned long flags; 337 338 338 339 priv = container_of(work, struct ccw_device_private, kick_work); 339 340 cdev = priv->cdev; 341 + spin_lock_irqsave(cdev->ccwlock, flags); 340 342 sch = to_subchannel(cdev->dev.parent); 341 - ret = (sch->driver && sch->driver->notify) ? 342 - sch->driver->notify(&sch->dev, CIO_OPER) : 0; 343 + if (sch->driver && sch->driver->notify) { 344 + spin_unlock_irqrestore(cdev->ccwlock, flags); 345 + ret = sch->driver->notify(&sch->dev, CIO_OPER); 346 + spin_lock_irqsave(cdev->ccwlock, flags); 347 + } else 348 + ret = 0; 349 + if (ret) { 350 + /* Reenable channel measurements, if needed. */ 351 + spin_unlock_irqrestore(cdev->ccwlock, flags); 352 + cmf_reenable(cdev); 353 + spin_lock_irqsave(cdev->ccwlock, flags); 354 + wake_up(&cdev->private->wait_q); 355 + } 356 + spin_unlock_irqrestore(cdev->ccwlock, flags); 343 357 if (!ret) 344 358 /* Driver doesn't want device back. */ 345 359 ccw_device_do_unreg_rereg(work); 346 - else { 347 - /* Reenable channel measurements, if needed. */ 348 - cmf_reenable(cdev); 349 - wake_up(&cdev->private->wait_q); 350 - } 351 360 } 352 361 353 362 /* ··· 543 534 struct ccw_device *cdev; 544 535 struct subchannel *sch; 545 536 int ret; 537 + unsigned long flags; 546 538 547 539 priv = container_of(work, struct ccw_device_private, kick_work); 548 540 cdev = priv->cdev; 541 + spin_lock_irqsave(cdev->ccwlock, flags); 549 542 sch = to_subchannel(cdev->dev.parent); 550 543 /* Extra sanity. */ 551 544 if (sch->lpm) 552 - return; 553 - ret = (sch->driver && sch->driver->notify) ? 554 - sch->driver->notify(&sch->dev, CIO_NO_PATH) : 0; 545 + goto out_unlock; 546 + if (sch->driver && sch->driver->notify) { 547 + spin_unlock_irqrestore(cdev->ccwlock, flags); 548 + ret = sch->driver->notify(&sch->dev, CIO_NO_PATH); 549 + spin_lock_irqsave(cdev->ccwlock, flags); 550 + } else 551 + ret = 0; 555 552 if (!ret) { 556 553 if (get_device(&sch->dev)) { 557 554 /* Driver doesn't want to keep device. */ ··· 577 562 cdev->private->state = DEV_STATE_DISCONNECTED; 578 563 wake_up(&cdev->private->wait_q); 579 564 } 565 + out_unlock: 566 + spin_unlock_irqrestore(cdev->ccwlock, flags); 580 567 } 581 568 582 569 void ··· 624 607 default: 625 608 /* Reset oper notify indication after verify error. */ 626 609 cdev->private->flags.donotify = 0; 627 - PREPARE_WORK(&cdev->private->kick_work, 628 - ccw_device_nopath_notify); 629 - queue_work(ccw_device_notify_work, &cdev->private->kick_work); 630 - ccw_device_done(cdev, DEV_STATE_NOT_OPER); 610 + if (cdev->online) { 611 + PREPARE_WORK(&cdev->private->kick_work, 612 + ccw_device_nopath_notify); 613 + queue_work(ccw_device_notify_work, 614 + &cdev->private->kick_work); 615 + } else 616 + ccw_device_done(cdev, DEV_STATE_NOT_OPER); 631 617 break; 632 618 } 633 619 } ··· 776 756 ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event) 777 757 { 778 758 struct subchannel *sch; 759 + int ret; 779 760 780 761 sch = to_subchannel(cdev->dev.parent); 781 - if (sch->driver->notify && 782 - sch->driver->notify(&sch->dev, sch->lpm ? CIO_GONE : CIO_NO_PATH)) { 783 - ccw_device_set_timeout(cdev, 0); 784 - cdev->private->flags.fake_irb = 0; 785 - cdev->private->state = DEV_STATE_DISCONNECTED; 786 - wake_up(&cdev->private->wait_q); 787 - return; 762 + if (sch->driver->notify) { 763 + spin_unlock_irq(cdev->ccwlock); 764 + ret = sch->driver->notify(&sch->dev, 765 + sch->lpm ? CIO_GONE : CIO_NO_PATH); 766 + spin_lock_irq(cdev->ccwlock); 767 + } else 768 + ret = 0; 769 + if (ret) { 770 + ccw_device_set_timeout(cdev, 0); 771 + cdev->private->flags.fake_irb = 0; 772 + cdev->private->state = DEV_STATE_DISCONNECTED; 773 + wake_up(&cdev->private->wait_q); 774 + return; 788 775 } 789 776 cdev->private->state = DEV_STATE_NOT_OPER; 790 777 cio_disable_subchannel(sch); ··· 996 969 997 970 sch = to_subchannel(cdev->dev.parent); 998 971 ccw_device_set_timeout(cdev, 0); 972 + /* Start delayed path verification. */ 973 + ccw_device_online_verify(cdev, 0); 999 974 /* OK, i/o is dead now. Call interrupt handler. */ 1000 - cdev->private->state = DEV_STATE_ONLINE; 1001 975 if (cdev->handler) 1002 976 cdev->handler(cdev, cdev->private->intparm, 1003 977 ERR_PTR(-EIO)); 1004 - if (!sch->lpm) { 1005 - PREPARE_WORK(&cdev->private->kick_work, 1006 - ccw_device_nopath_notify); 1007 - queue_work(ccw_device_notify_work, &cdev->private->kick_work); 1008 - } else if (cdev->private->flags.doverify) 1009 - /* Start delayed path verification. */ 1010 - ccw_device_online_verify(cdev, 0); 1011 978 } 1012 979 1013 980 static void ··· 1014 993 ccw_device_set_timeout(cdev, 3*HZ); 1015 994 return; 1016 995 } 1017 - if (ret == -ENODEV) { 1018 - struct subchannel *sch; 1019 - 1020 - sch = to_subchannel(cdev->dev.parent); 1021 - if (!sch->lpm) { 1022 - PREPARE_WORK(&cdev->private->kick_work, 1023 - ccw_device_nopath_notify); 1024 - queue_work(ccw_device_notify_work, 1025 - &cdev->private->kick_work); 1026 - } else 1027 - dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 1028 - return; 1029 - } 1030 - //FIXME: Can we get here? 1031 - cdev->private->state = DEV_STATE_ONLINE; 996 + /* Start delayed path verification. */ 997 + ccw_device_online_verify(cdev, 0); 1032 998 if (cdev->handler) 1033 999 cdev->handler(cdev, cdev->private->intparm, 1034 1000 ERR_PTR(-EIO)); ··· 1033 1025 cdev->private->state = DEV_STATE_TIMEOUT_KILL; 1034 1026 return; 1035 1027 } 1036 - if (ret == -ENODEV) { 1037 - if (!sch->lpm) { 1038 - PREPARE_WORK(&cdev->private->kick_work, 1039 - ccw_device_nopath_notify); 1040 - queue_work(ccw_device_notify_work, 1041 - &cdev->private->kick_work); 1042 - } else 1043 - dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 1044 - return; 1045 - } 1028 + /* Start delayed path verification. */ 1029 + ccw_device_online_verify(cdev, 0); 1046 1030 if (cdev->handler) 1047 1031 cdev->handler(cdev, cdev->private->intparm, 1048 1032 ERR_PTR(-EIO)); 1049 - if (!sch->lpm) { 1050 - PREPARE_WORK(&cdev->private->kick_work, 1051 - ccw_device_nopath_notify); 1052 - queue_work(ccw_device_notify_work, &cdev->private->kick_work); 1053 - } else 1054 - /* Start delayed path verification. */ 1055 - ccw_device_online_verify(cdev, 0); 1056 1033 } 1057 1034 1058 1035 static void
+1 -1
include/asm-s390/bugs.h
··· 16 16 * void check_bugs(void); 17 17 */ 18 18 19 - static void __init check_bugs(void) 19 + static inline void check_bugs(void) 20 20 { 21 21 /* s390 has no bugs ... */ 22 22 }
+1
include/asm-s390/ipl.h
··· 74 74 extern u32 ipl_flags; 75 75 extern u16 ipl_devno; 76 76 77 + extern u32 dump_prefix_page; 77 78 extern void do_reipl(void); 78 79 extern void ipl_save_parameters(void); 79 80