Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc

Pull sparc fixes from David Miller:
"Several build/bug fixes for sparc, including:

1) Configuring a mix of static vs. modular sparc64 crypto modules
didn't work, remove an ill-conceived attempt to only have to build
the device match table for these drivers once to fix the problem.

Reported by Meelis Roos.

2) Make the montgomery multiple/square and mpmul instructions actually
usable in 32-bit tasks. Essentially this involves providing 32-bit
userspace with a way to use a 64-bit stack when it needs to.

3) Our sparc64 atomic backoffs don't yield cpu strands properly on
Niagara chips. Use pause instruction when available to achieve
this, otherwise use a benign instruction we know blocks the strand
for some time.

4) Wire up kcmp

5) Fix the build of various drivers by removing the unnecessary
blocking of OF_GPIO when SPARC.

6) Fix unintended regression wherein of_address_to_resource stopped
being provided. Fix from Andreas Larsson.

7) Fix NULL dereference in leon_handle_ext_irq(), also from Andreas
Larsson."

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc:
sparc64: Fix build with mix of modular vs. non-modular crypto drivers.
sparc: Support atomic64_dec_if_positive properly.
of/address: sparc: Declare of_address_to_resource() as an extern function for sparc again
sparc32, leon: Check for existent irq_map entry in leon_handle_ext_irq
sparc: Add sparc support for platform_get_irq()
sparc: Allow OF_GPIO on sparc.
qlogicpti: Fix build warning.
sparc: Wire up sys_kcmp.
sparc64: Improvde documentation and readability of atomic backoff code.
sparc64: Use pause instruction when available.
sparc64: Fix cpu strand yielding.
sparc64: Make montmul/montsqr/mpmul usable in 32-bit threads.

+291 -97
+1
arch/sparc/Kconfig
··· 20 20 select HAVE_ARCH_TRACEHOOK 21 21 select SYSCTL_EXCEPTION_TRACE 22 22 select ARCH_WANT_OPTIONAL_GPIOLIB 23 + select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE 23 24 select RTC_CLASS 24 25 select RTC_DRV_M48T59 25 26 select HAVE_IRQ_WORK
+8 -8
arch/sparc/crypto/Makefile
··· 13 13 14 14 obj-$(CONFIG_CRYPTO_CRC32C_SPARC64) += crc32c-sparc64.o 15 15 16 - sha1-sparc64-y := sha1_asm.o sha1_glue.o crop_devid.o 17 - sha256-sparc64-y := sha256_asm.o sha256_glue.o crop_devid.o 18 - sha512-sparc64-y := sha512_asm.o sha512_glue.o crop_devid.o 19 - md5-sparc64-y := md5_asm.o md5_glue.o crop_devid.o 16 + sha1-sparc64-y := sha1_asm.o sha1_glue.o 17 + sha256-sparc64-y := sha256_asm.o sha256_glue.o 18 + sha512-sparc64-y := sha512_asm.o sha512_glue.o 19 + md5-sparc64-y := md5_asm.o md5_glue.o 20 20 21 - aes-sparc64-y := aes_asm.o aes_glue.o crop_devid.o 22 - des-sparc64-y := des_asm.o des_glue.o crop_devid.o 23 - camellia-sparc64-y := camellia_asm.o camellia_glue.o crop_devid.o 21 + aes-sparc64-y := aes_asm.o aes_glue.o 22 + des-sparc64-y := des_asm.o des_glue.o 23 + camellia-sparc64-y := camellia_asm.o camellia_glue.o 24 24 25 - crc32c-sparc64-y := crc32c_asm.o crc32c_glue.o crop_devid.o 25 + crc32c-sparc64-y := crc32c_asm.o crc32c_glue.o
+2
arch/sparc/crypto/aes_glue.c
··· 475 475 MODULE_DESCRIPTION("AES Secure Hash Algorithm, sparc64 aes opcode accelerated"); 476 476 477 477 MODULE_ALIAS("aes"); 478 + 479 + #include "crop_devid.c"
+2
arch/sparc/crypto/camellia_glue.c
··· 320 320 MODULE_DESCRIPTION("Camellia Cipher Algorithm, sparc64 camellia opcode accelerated"); 321 321 322 322 MODULE_ALIAS("aes"); 323 + 324 + #include "crop_devid.c"
+2
arch/sparc/crypto/crc32c_glue.c
··· 177 177 MODULE_DESCRIPTION("CRC32c (Castagnoli), sparc64 crc32c opcode accelerated"); 178 178 179 179 MODULE_ALIAS("crc32c"); 180 + 181 + #include "crop_devid.c"
+2
arch/sparc/crypto/des_glue.c
··· 527 527 MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms, sparc64 des opcode accelerated"); 528 528 529 529 MODULE_ALIAS("des"); 530 + 531 + #include "crop_devid.c"
+2
arch/sparc/crypto/md5_glue.c
··· 186 186 MODULE_DESCRIPTION("MD5 Secure Hash Algorithm, sparc64 md5 opcode accelerated"); 187 187 188 188 MODULE_ALIAS("md5"); 189 + 190 + #include "crop_devid.c"
+2
arch/sparc/crypto/sha1_glue.c
··· 181 181 MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, sparc64 sha1 opcode accelerated"); 182 182 183 183 MODULE_ALIAS("sha1"); 184 + 185 + #include "crop_devid.c"
+2
arch/sparc/crypto/sha256_glue.c
··· 239 239 240 240 MODULE_ALIAS("sha224"); 241 241 MODULE_ALIAS("sha256"); 242 + 243 + #include "crop_devid.c"
+2
arch/sparc/crypto/sha512_glue.c
··· 224 224 225 225 MODULE_ALIAS("sha384"); 226 226 MODULE_ALIAS("sha512"); 227 + 228 + #include "crop_devid.c"
+3 -1
arch/sparc/include/asm/atomic_64.h
··· 1 1 /* atomic.h: Thankfully the V9 is at least reasonable for this 2 2 * stuff. 3 3 * 4 - * Copyright (C) 1996, 1997, 2000 David S. Miller (davem@redhat.com) 4 + * Copyright (C) 1996, 1997, 2000, 2012 David S. Miller (davem@redhat.com) 5 5 */ 6 6 7 7 #ifndef __ARCH_SPARC64_ATOMIC__ ··· 105 105 } 106 106 107 107 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) 108 + 109 + extern long atomic64_dec_if_positive(atomic64_t *v); 108 110 109 111 /* Atomic operations are already serializing */ 110 112 #define smp_mb__before_atomic_dec() barrier()
+59 -10
arch/sparc/include/asm/backoff.h
··· 1 1 #ifndef _SPARC64_BACKOFF_H 2 2 #define _SPARC64_BACKOFF_H 3 3 4 + /* The macros in this file implement an exponential backoff facility 5 + * for atomic operations. 6 + * 7 + * When multiple threads compete on an atomic operation, it is 8 + * possible for one thread to be continually denied a successful 9 + * completion of the compare-and-swap instruction. Heavily 10 + * threaded cpu implementations like Niagara can compound this 11 + * problem even further. 12 + * 13 + * When an atomic operation fails and needs to be retried, we spin a 14 + * certain number of times. At each subsequent failure of the same 15 + * operation we double the spin count, realizing an exponential 16 + * backoff. 17 + * 18 + * When we spin, we try to use an operation that will cause the 19 + * current cpu strand to block, and therefore make the core fully 20 + * available to any other other runnable strands. There are two 21 + * options, based upon cpu capabilities. 22 + * 23 + * On all cpus prior to SPARC-T4 we do three dummy reads of the 24 + * condition code register. Each read blocks the strand for something 25 + * between 40 and 50 cpu cycles. 26 + * 27 + * For SPARC-T4 and later we have a special "pause" instruction 28 + * available. This is implemented using writes to register %asr27. 29 + * The cpu will block the number of cycles written into the register, 30 + * unless a disrupting trap happens first. SPARC-T4 specifically 31 + * implements pause with a granularity of 8 cycles. Each strand has 32 + * an internal pause counter which decrements every 8 cycles. So the 33 + * chip shifts the %asr27 value down by 3 bits, and writes the result 34 + * into the pause counter. If a value smaller than 8 is written, the 35 + * chip blocks for 1 cycle. 36 + * 37 + * To achieve the same amount of backoff as the three %ccr reads give 38 + * on earlier chips, we shift the backoff value up by 7 bits. (Three 39 + * %ccr reads block for about 128 cycles, 1 << 7 == 128) We write the 40 + * whole amount we want to block into the pause register, rather than 41 + * loop writing 128 each time. 42 + */ 43 + 4 44 #define BACKOFF_LIMIT (4 * 1024) 5 45 6 46 #ifdef CONFIG_SMP ··· 51 11 #define BACKOFF_LABEL(spin_label, continue_label) \ 52 12 spin_label 53 13 54 - #define BACKOFF_SPIN(reg, tmp, label) \ 55 - mov reg, tmp; \ 56 - 88: brnz,pt tmp, 88b; \ 57 - sub tmp, 1, tmp; \ 58 - set BACKOFF_LIMIT, tmp; \ 59 - cmp reg, tmp; \ 60 - bg,pn %xcc, label; \ 61 - nop; \ 62 - ba,pt %xcc, label; \ 63 - sllx reg, 1, reg; 14 + #define BACKOFF_SPIN(reg, tmp, label) \ 15 + mov reg, tmp; \ 16 + 88: rd %ccr, %g0; \ 17 + rd %ccr, %g0; \ 18 + rd %ccr, %g0; \ 19 + .section .pause_3insn_patch,"ax";\ 20 + .word 88b; \ 21 + sllx tmp, 7, tmp; \ 22 + wr tmp, 0, %asr27; \ 23 + clr tmp; \ 24 + .previous; \ 25 + brnz,pt tmp, 88b; \ 26 + sub tmp, 1, tmp; \ 27 + set BACKOFF_LIMIT, tmp; \ 28 + cmp reg, tmp; \ 29 + bg,pn %xcc, label; \ 30 + nop; \ 31 + ba,pt %xcc, label; \ 32 + sllx reg, 1, reg; 64 33 65 34 #else 66 35
+3 -2
arch/sparc/include/asm/compat.h
··· 232 232 struct pt_regs *regs = current_thread_info()->kregs; 233 233 unsigned long usp = regs->u_regs[UREG_I6]; 234 234 235 - if (!(test_thread_flag(TIF_32BIT))) 235 + if (test_thread_64bit_stack(usp)) 236 236 usp += STACK_BIAS; 237 - else 237 + 238 + if (test_thread_flag(TIF_32BIT)) 238 239 usp &= 0xffffffffUL; 239 240 240 241 usp -= len;
+16 -1
arch/sparc/include/asm/processor_64.h
··· 196 196 #define KSTK_EIP(tsk) (task_pt_regs(tsk)->tpc) 197 197 #define KSTK_ESP(tsk) (task_pt_regs(tsk)->u_regs[UREG_FP]) 198 198 199 - #define cpu_relax() barrier() 199 + /* Please see the commentary in asm/backoff.h for a description of 200 + * what these instructions are doing and how they have been choosen. 201 + * To make a long story short, we are trying to yield the current cpu 202 + * strand during busy loops. 203 + */ 204 + #define cpu_relax() asm volatile("\n99:\n\t" \ 205 + "rd %%ccr, %%g0\n\t" \ 206 + "rd %%ccr, %%g0\n\t" \ 207 + "rd %%ccr, %%g0\n\t" \ 208 + ".section .pause_3insn_patch,\"ax\"\n\t"\ 209 + ".word 99b\n\t" \ 210 + "wr %%g0, 128, %%asr27\n\t" \ 211 + "nop\n\t" \ 212 + "nop\n\t" \ 213 + ".previous" \ 214 + ::: "memory") 200 215 201 216 /* Prefetch support. This is tuned for UltraSPARC-III and later. 202 217 * UltraSPARC-I will treat these as nops, and UltraSPARC-II has
+5
arch/sparc/include/asm/prom.h
··· 63 63 extern void irq_trans_init(struct device_node *dp); 64 64 extern char *build_path_component(struct device_node *dp); 65 65 66 + /* SPARC has a local implementation */ 67 + extern int of_address_to_resource(struct device_node *dev, int index, 68 + struct resource *r); 69 + #define of_address_to_resource of_address_to_resource 70 + 66 71 #endif /* __KERNEL__ */ 67 72 #endif /* _SPARC_PROM_H */
+5
arch/sparc/include/asm/thread_info_64.h
··· 259 259 260 260 #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) 261 261 262 + #define thread32_stack_is_64bit(__SP) (((__SP) & 0x1) != 0) 263 + #define test_thread_64bit_stack(__SP) \ 264 + ((test_thread_flag(TIF_32BIT) && !thread32_stack_is_64bit(__SP)) ? \ 265 + false : true) 266 + 262 267 #endif /* !__ASSEMBLY__ */ 263 268 264 269 #endif /* __KERNEL__ */
+16 -8
arch/sparc/include/asm/ttable.h
··· 372 372 373 373 /* Normal 32bit spill */ 374 374 #define SPILL_2_GENERIC(ASI) \ 375 - srl %sp, 0, %sp; \ 375 + and %sp, 1, %g3; \ 376 + brnz,pn %g3, (. - (128 + 4)); \ 377 + srl %sp, 0, %sp; \ 376 378 stwa %l0, [%sp + %g0] ASI; \ 377 379 mov 0x04, %g3; \ 378 380 stwa %l1, [%sp + %g3] ASI; \ ··· 400 398 stwa %i6, [%g1 + %g0] ASI; \ 401 399 stwa %i7, [%g1 + %g3] ASI; \ 402 400 saved; \ 403 - retry; nop; nop; \ 401 + retry; \ 404 402 b,a,pt %xcc, spill_fixup_dax; \ 405 403 b,a,pt %xcc, spill_fixup_mna; \ 406 404 b,a,pt %xcc, spill_fixup; 407 405 408 406 #define SPILL_2_GENERIC_ETRAP \ 409 407 etrap_user_spill_32bit: \ 410 - srl %sp, 0, %sp; \ 408 + and %sp, 1, %g3; \ 409 + brnz,pn %g3, etrap_user_spill_64bit; \ 410 + srl %sp, 0, %sp; \ 411 411 stwa %l0, [%sp + 0x00] %asi; \ 412 412 stwa %l1, [%sp + 0x04] %asi; \ 413 413 stwa %l2, [%sp + 0x08] %asi; \ ··· 431 427 ba,pt %xcc, etrap_save; \ 432 428 wrpr %g1, %cwp; \ 433 429 nop; nop; nop; nop; \ 434 - nop; nop; nop; nop; \ 430 + nop; nop; \ 435 431 ba,a,pt %xcc, etrap_spill_fixup_32bit; \ 436 432 ba,a,pt %xcc, etrap_spill_fixup_32bit; \ 437 433 ba,a,pt %xcc, etrap_spill_fixup_32bit; ··· 596 592 597 593 /* Normal 32bit fill */ 598 594 #define FILL_2_GENERIC(ASI) \ 599 - srl %sp, 0, %sp; \ 595 + and %sp, 1, %g3; \ 596 + brnz,pn %g3, (. - (128 + 4)); \ 597 + srl %sp, 0, %sp; \ 600 598 lduwa [%sp + %g0] ASI, %l0; \ 601 599 mov 0x04, %g2; \ 602 600 mov 0x08, %g3; \ ··· 622 616 lduwa [%g1 + %g3] ASI, %i6; \ 623 617 lduwa [%g1 + %g5] ASI, %i7; \ 624 618 restored; \ 625 - retry; nop; nop; nop; nop; \ 619 + retry; nop; nop; \ 626 620 b,a,pt %xcc, fill_fixup_dax; \ 627 621 b,a,pt %xcc, fill_fixup_mna; \ 628 622 b,a,pt %xcc, fill_fixup; 629 623 630 624 #define FILL_2_GENERIC_RTRAP \ 631 625 user_rtt_fill_32bit: \ 632 - srl %sp, 0, %sp; \ 626 + and %sp, 1, %g3; \ 627 + brnz,pn %g3, user_rtt_fill_64bit; \ 628 + srl %sp, 0, %sp; \ 633 629 lduwa [%sp + 0x00] %asi, %l0; \ 634 630 lduwa [%sp + 0x04] %asi, %l1; \ 635 631 lduwa [%sp + 0x08] %asi, %l2; \ ··· 651 643 ba,pt %xcc, user_rtt_pre_restore; \ 652 644 restored; \ 653 645 nop; nop; nop; nop; nop; \ 654 - nop; nop; nop; nop; nop; \ 646 + nop; nop; nop; \ 655 647 ba,a,pt %xcc, user_rtt_fill_fixup; \ 656 648 ba,a,pt %xcc, user_rtt_fill_fixup; \ 657 649 ba,a,pt %xcc, user_rtt_fill_fixup;
+6 -1
arch/sparc/include/uapi/asm/unistd.h
··· 405 405 #define __NR_setns 337 406 406 #define __NR_process_vm_readv 338 407 407 #define __NR_process_vm_writev 339 408 + #define __NR_kern_features 340 409 + #define __NR_kcmp 341 408 410 409 - #define NR_syscalls 340 411 + #define NR_syscalls 342 412 + 413 + /* Bitmask values returned from kern_features system call. */ 414 + #define KERN_FEATURE_MIXED_MODE_STACK 0x00000001 410 415 411 416 #ifdef __32bit_syscall_numbers__ 412 417 /* Sparc 32-bit only has the "setresuid32", "getresuid32" variants,
+7
arch/sparc/kernel/entry.h
··· 59 59 extern struct popc_6insn_patch_entry __popc_6insn_patch, 60 60 __popc_6insn_patch_end; 61 61 62 + struct pause_patch_entry { 63 + unsigned int addr; 64 + unsigned int insns[3]; 65 + }; 66 + extern struct pause_patch_entry __pause_3insn_patch, 67 + __pause_3insn_patch_end; 68 + 62 69 extern void __init per_cpu_patch(void); 63 70 extern void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *, 64 71 struct sun4v_1insn_patch_entry *);
+4 -2
arch/sparc/kernel/leon_kernel.c
··· 56 56 static void leon_handle_ext_irq(unsigned int irq, struct irq_desc *desc) 57 57 { 58 58 unsigned int eirq; 59 + struct irq_bucket *p; 59 60 int cpu = sparc_leon3_cpuid(); 60 61 61 62 eirq = leon_eirq_get(cpu); 62 - if ((eirq & 0x10) && irq_map[eirq]->irq) /* bit4 tells if IRQ happened */ 63 - generic_handle_irq(irq_map[eirq]->irq); 63 + p = irq_map[eirq]; 64 + if ((eirq & 0x10) && p && p->irq) /* bit4 tells if IRQ happened */ 65 + generic_handle_irq(p->irq); 64 66 } 65 67 66 68 /* The extended IRQ controller has been found, this function registers it */
+16 -6
arch/sparc/kernel/perf_event.c
··· 1762 1762 1763 1763 ufp = regs->u_regs[UREG_I6] & 0xffffffffUL; 1764 1764 do { 1765 - struct sparc_stackf32 *usf, sf; 1766 1765 unsigned long pc; 1767 1766 1768 - usf = (struct sparc_stackf32 *) ufp; 1769 - if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) 1770 - break; 1767 + if (thread32_stack_is_64bit(ufp)) { 1768 + struct sparc_stackf *usf, sf; 1771 1769 1772 - pc = sf.callers_pc; 1773 - ufp = (unsigned long)sf.fp; 1770 + ufp += STACK_BIAS; 1771 + usf = (struct sparc_stackf *) ufp; 1772 + if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) 1773 + break; 1774 + pc = sf.callers_pc & 0xffffffff; 1775 + ufp = ((unsigned long) sf.fp) & 0xffffffff; 1776 + } else { 1777 + struct sparc_stackf32 *usf, sf; 1778 + usf = (struct sparc_stackf32 *) ufp; 1779 + if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) 1780 + break; 1781 + pc = sf.callers_pc; 1782 + ufp = (unsigned long)sf.fp; 1783 + } 1774 1784 perf_callchain_store(entry, pc); 1775 1785 } while (entry->nr < PERF_MAX_STACK_DEPTH); 1776 1786 }
+23 -19
arch/sparc/kernel/process_64.c
··· 452 452 /* It's a bit more tricky when 64-bit tasks are involved... */ 453 453 static unsigned long clone_stackframe(unsigned long csp, unsigned long psp) 454 454 { 455 + bool stack_64bit = test_thread_64bit_stack(psp); 455 456 unsigned long fp, distance, rval; 456 457 457 - if (!(test_thread_flag(TIF_32BIT))) { 458 + if (stack_64bit) { 458 459 csp += STACK_BIAS; 459 460 psp += STACK_BIAS; 460 461 __get_user(fp, &(((struct reg_window __user *)psp)->ins[6])); 461 462 fp += STACK_BIAS; 463 + if (test_thread_flag(TIF_32BIT)) 464 + fp &= 0xffffffff; 462 465 } else 463 466 __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6])); 464 467 ··· 475 472 rval = (csp - distance); 476 473 if (copy_in_user((void __user *) rval, (void __user *) psp, distance)) 477 474 rval = 0; 478 - else if (test_thread_flag(TIF_32BIT)) { 475 + else if (!stack_64bit) { 479 476 if (put_user(((u32)csp), 480 477 &(((struct reg_window32 __user *)rval)->ins[6]))) 481 478 rval = 0; ··· 510 507 511 508 flush_user_windows(); 512 509 if ((window = get_thread_wsaved()) != 0) { 513 - int winsize = sizeof(struct reg_window); 514 - int bias = 0; 515 - 516 - if (test_thread_flag(TIF_32BIT)) 517 - winsize = sizeof(struct reg_window32); 518 - else 519 - bias = STACK_BIAS; 520 - 521 510 window -= 1; 522 511 do { 523 - unsigned long sp = (t->rwbuf_stkptrs[window] + bias); 524 512 struct reg_window *rwin = &t->reg_window[window]; 513 + int winsize = sizeof(struct reg_window); 514 + unsigned long sp; 515 + 516 + sp = t->rwbuf_stkptrs[window]; 517 + 518 + if (test_thread_64bit_stack(sp)) 519 + sp += STACK_BIAS; 520 + else 521 + winsize = sizeof(struct reg_window32); 525 522 526 523 if (!copy_to_user((char __user *)sp, rwin, winsize)) { 527 524 shift_window_buffer(window, get_thread_wsaved() - 1, t); ··· 547 544 { 548 545 struct thread_info *t = current_thread_info(); 549 546 unsigned long window; 550 - int winsize = sizeof(struct reg_window); 551 - int bias = 0; 552 - 553 - if (test_thread_flag(TIF_32BIT)) 554 - winsize = sizeof(struct reg_window32); 555 - else 556 - bias = STACK_BIAS; 557 547 558 548 flush_user_windows(); 559 549 window = get_thread_wsaved(); ··· 554 558 if (likely(window != 0)) { 555 559 window -= 1; 556 560 do { 557 - unsigned long sp = (t->rwbuf_stkptrs[window] + bias); 558 561 struct reg_window *rwin = &t->reg_window[window]; 562 + int winsize = sizeof(struct reg_window); 563 + unsigned long sp; 564 + 565 + sp = t->rwbuf_stkptrs[window]; 566 + 567 + if (test_thread_64bit_stack(sp)) 568 + sp += STACK_BIAS; 569 + else 570 + winsize = sizeof(struct reg_window32); 559 571 560 572 if (unlikely(sp & 0x7UL)) 561 573 stack_unaligned(sp);
+2 -2
arch/sparc/kernel/ptrace_64.c
··· 151 151 { 152 152 unsigned long rw_addr = regs->u_regs[UREG_I6]; 153 153 154 - if (test_tsk_thread_flag(current, TIF_32BIT)) { 154 + if (!test_thread_64bit_stack(rw_addr)) { 155 155 struct reg_window32 win32; 156 156 int i; 157 157 ··· 176 176 { 177 177 unsigned long rw_addr = regs->u_regs[UREG_I6]; 178 178 179 - if (test_tsk_thread_flag(current, TIF_32BIT)) { 179 + if (!test_thread_64bit_stack(rw_addr)) { 180 180 struct reg_window32 win32; 181 181 int i; 182 182
+21
arch/sparc/kernel/setup_64.c
··· 316 316 } 317 317 } 318 318 319 + static void __init pause_patch(void) 320 + { 321 + struct pause_patch_entry *p; 322 + 323 + p = &__pause_3insn_patch; 324 + while (p < &__pause_3insn_patch_end) { 325 + unsigned long i, addr = p->addr; 326 + 327 + for (i = 0; i < 3; i++) { 328 + *(unsigned int *) (addr + (i * 4)) = p->insns[i]; 329 + wmb(); 330 + __asm__ __volatile__("flush %0" 331 + : : "r" (addr + (i * 4))); 332 + } 333 + 334 + p++; 335 + } 336 + } 337 + 319 338 #ifdef CONFIG_SMP 320 339 void __init boot_cpu_id_too_large(int cpu) 321 340 { ··· 547 528 548 529 if (sparc64_elf_hwcap & AV_SPARC_POPC) 549 530 popc_patch(); 531 + if (sparc64_elf_hwcap & AV_SPARC_PAUSE) 532 + pause_patch(); 550 533 } 551 534 552 535 void __init setup_arch(char **cmdline_p)
+5
arch/sparc/kernel/sys_sparc_64.c
··· 751 751 : "cc"); 752 752 return __res; 753 753 } 754 + 755 + asmlinkage long sys_kern_features(void) 756 + { 757 + return KERN_FEATURE_MIXED_MODE_STACK; 758 + }
+1
arch/sparc/kernel/systbls_32.S
··· 85 85 /*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init 86 86 /*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime 87 87 /*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev 88 + /*340*/ .long sys_ni_syscall, sys_kcmp
+2
arch/sparc/kernel/systbls_64.S
··· 86 86 .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init 87 87 /*330*/ .word sys32_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime 88 88 .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev 89 + /*340*/ .word sys_kern_features, sys_kcmp 89 90 90 91 #endif /* CONFIG_COMPAT */ 91 92 ··· 164 163 .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init 165 164 /*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime 166 165 .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev 166 + /*340*/ .word sys_kern_features, sys_kcmp
+23 -13
arch/sparc/kernel/unaligned_64.c
··· 113 113 114 114 static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs) 115 115 { 116 - unsigned long value; 116 + unsigned long value, fp; 117 117 118 118 if (reg < 16) 119 119 return (!reg ? 0 : regs->u_regs[reg]); 120 + 121 + fp = regs->u_regs[UREG_FP]; 122 + 120 123 if (regs->tstate & TSTATE_PRIV) { 121 124 struct reg_window *win; 122 - win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS); 125 + win = (struct reg_window *)(fp + STACK_BIAS); 123 126 value = win->locals[reg - 16]; 124 - } else if (test_thread_flag(TIF_32BIT)) { 127 + } else if (!test_thread_64bit_stack(fp)) { 125 128 struct reg_window32 __user *win32; 126 - win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP])); 129 + win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp)); 127 130 get_user(value, &win32->locals[reg - 16]); 128 131 } else { 129 132 struct reg_window __user *win; 130 - win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS); 133 + win = (struct reg_window __user *)(fp + STACK_BIAS); 131 134 get_user(value, &win->locals[reg - 16]); 132 135 } 133 136 return value; ··· 138 135 139 136 static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs) 140 137 { 138 + unsigned long fp; 139 + 141 140 if (reg < 16) 142 141 return &regs->u_regs[reg]; 142 + 143 + fp = regs->u_regs[UREG_FP]; 144 + 143 145 if (regs->tstate & TSTATE_PRIV) { 144 146 struct reg_window *win; 145 - win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS); 147 + win = (struct reg_window *)(fp + STACK_BIAS); 146 148 return &win->locals[reg - 16]; 147 - } else if (test_thread_flag(TIF_32BIT)) { 149 + } else if (!test_thread_64bit_stack(fp)) { 148 150 struct reg_window32 *win32; 149 - win32 = (struct reg_window32 *)((unsigned long)((u32)regs->u_regs[UREG_FP])); 151 + win32 = (struct reg_window32 *)((unsigned long)((u32)fp)); 150 152 return (unsigned long *)&win32->locals[reg - 16]; 151 153 } else { 152 154 struct reg_window *win; 153 - win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS); 155 + win = (struct reg_window *)(fp + STACK_BIAS); 154 156 return &win->locals[reg - 16]; 155 157 } 156 158 } ··· 400 392 if (rd) 401 393 regs->u_regs[rd] = ret; 402 394 } else { 403 - if (test_thread_flag(TIF_32BIT)) { 395 + unsigned long fp = regs->u_regs[UREG_FP]; 396 + 397 + if (!test_thread_64bit_stack(fp)) { 404 398 struct reg_window32 __user *win32; 405 - win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP])); 399 + win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp)); 406 400 put_user(ret, &win32->locals[rd - 16]); 407 401 } else { 408 402 struct reg_window __user *win; 409 - win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS); 403 + win = (struct reg_window __user *)(fp + STACK_BIAS); 410 404 put_user(ret, &win->locals[rd - 16]); 411 405 } 412 406 } ··· 564 554 reg[0] = 0; 565 555 if ((insn & 0x780000) == 0x180000) 566 556 reg[1] = 0; 567 - } else if (test_thread_flag(TIF_32BIT)) { 557 + } else if (!test_thread_64bit_stack(regs->u_regs[UREG_FP])) { 568 558 put_user(0, (int __user *) reg); 569 559 if ((insn & 0x780000) == 0x180000) 570 560 put_user(0, ((int __user *) reg) + 1);
+14 -9
arch/sparc/kernel/visemul.c
··· 149 149 150 150 static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs) 151 151 { 152 - unsigned long value; 152 + unsigned long value, fp; 153 153 154 154 if (reg < 16) 155 155 return (!reg ? 0 : regs->u_regs[reg]); 156 + 157 + fp = regs->u_regs[UREG_FP]; 158 + 156 159 if (regs->tstate & TSTATE_PRIV) { 157 160 struct reg_window *win; 158 - win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS); 161 + win = (struct reg_window *)(fp + STACK_BIAS); 159 162 value = win->locals[reg - 16]; 160 - } else if (test_thread_flag(TIF_32BIT)) { 163 + } else if (!test_thread_64bit_stack(fp)) { 161 164 struct reg_window32 __user *win32; 162 - win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP])); 165 + win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp)); 163 166 get_user(value, &win32->locals[reg - 16]); 164 167 } else { 165 168 struct reg_window __user *win; 166 - win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS); 169 + win = (struct reg_window __user *)(fp + STACK_BIAS); 167 170 get_user(value, &win->locals[reg - 16]); 168 171 } 169 172 return value; ··· 175 172 static inline unsigned long __user *__fetch_reg_addr_user(unsigned int reg, 176 173 struct pt_regs *regs) 177 174 { 175 + unsigned long fp = regs->u_regs[UREG_FP]; 176 + 178 177 BUG_ON(reg < 16); 179 178 BUG_ON(regs->tstate & TSTATE_PRIV); 180 179 181 - if (test_thread_flag(TIF_32BIT)) { 180 + if (!test_thread_64bit_stack(fp)) { 182 181 struct reg_window32 __user *win32; 183 - win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP])); 182 + win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp)); 184 183 return (unsigned long __user *)&win32->locals[reg - 16]; 185 184 } else { 186 185 struct reg_window __user *win; 187 - win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS); 186 + win = (struct reg_window __user *)(fp + STACK_BIAS); 188 187 return &win->locals[reg - 16]; 189 188 } 190 189 } ··· 209 204 } else { 210 205 unsigned long __user *rd_user = __fetch_reg_addr_user(rd, regs); 211 206 212 - if (test_thread_flag(TIF_32BIT)) 207 + if (!test_thread_64bit_stack(regs->u_regs[UREG_FP])) 213 208 __put_user((u32)val, (u32 __user *)rd_user); 214 209 else 215 210 __put_user(val, rd_user);
+5
arch/sparc/kernel/vmlinux.lds.S
··· 132 132 *(.popc_6insn_patch) 133 133 __popc_6insn_patch_end = .; 134 134 } 135 + .pause_3insn_patch : { 136 + __pause_3insn_patch = .; 137 + *(.pause_3insn_patch) 138 + __pause_3insn_patch_end = .; 139 + } 135 140 PERCPU_SECTION(SMP_CACHE_BYTES) 136 141 137 142 . = ALIGN(PAGE_SIZE);
+2
arch/sparc/kernel/winfixup.S
··· 43 43 spill_fixup_dax: 44 44 TRAP_LOAD_THREAD_REG(%g6, %g1) 45 45 ldx [%g6 + TI_FLAGS], %g1 46 + andcc %sp, 0x1, %g0 47 + movne %icc, 0, %g1 46 48 andcc %g1, _TIF_32BIT, %g0 47 49 ldub [%g6 + TI_WSAVED], %g1 48 50 sll %g1, 3, %g3
+15 -1
arch/sparc/lib/atomic_64.S
··· 1 1 /* atomic.S: These things are too big to do inline. 2 2 * 3 - * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net) 3 + * Copyright (C) 1999, 2007 2012 David S. Miller (davem@davemloft.net) 4 4 */ 5 5 6 6 #include <linux/linkage.h> ··· 117 117 sub %g1, %o0, %o0 118 118 2: BACKOFF_SPIN(%o2, %o3, 1b) 119 119 ENDPROC(atomic64_sub_ret) 120 + 121 + ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */ 122 + BACKOFF_SETUP(%o2) 123 + 1: ldx [%o0], %g1 124 + brlez,pn %g1, 3f 125 + sub %g1, 1, %g7 126 + casx [%o0], %g1, %g7 127 + cmp %g1, %g7 128 + bne,pn %xcc, BACKOFF_LABEL(2f, 1b) 129 + nop 130 + 3: retl 131 + sub %g1, 1, %o0 132 + 2: BACKOFF_SPIN(%o2, %o3, 1b) 133 + ENDPROC(atomic64_dec_if_positive)
+1
arch/sparc/lib/ksyms.c
··· 116 116 EXPORT_SYMBOL(atomic64_add_ret); 117 117 EXPORT_SYMBOL(atomic64_sub); 118 118 EXPORT_SYMBOL(atomic64_sub_ret); 119 + EXPORT_SYMBOL(atomic64_dec_if_positive); 119 120 120 121 /* Atomic bit operations. */ 121 122 EXPORT_SYMBOL(test_and_set_bit);
+1 -1
arch/sparc/math-emu/math_64.c
··· 320 320 XR = 0; 321 321 else if (freg < 16) 322 322 XR = regs->u_regs[freg]; 323 - else if (test_thread_flag(TIF_32BIT)) { 323 + else if (!test_thread_64bit_stack(regs->u_regs[UREG_FP])) { 324 324 struct reg_window32 __user *win32; 325 325 flushw_user (); 326 326 win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
+7
drivers/base/platform.c
··· 83 83 */ 84 84 int platform_get_irq(struct platform_device *dev, unsigned int num) 85 85 { 86 + #ifdef CONFIG_SPARC 87 + /* sparc does not have irqs represented as IORESOURCE_IRQ resources */ 88 + if (!dev || num >= dev->archdata.num_irqs) 89 + return -ENXIO; 90 + return dev->archdata.irqs[num]; 91 + #else 86 92 struct resource *r = platform_get_resource(dev, IORESOURCE_IRQ, num); 87 93 88 94 return r ? r->start : -ENXIO; 95 + #endif 89 96 } 90 97 EXPORT_SYMBOL_GPL(platform_get_irq); 91 98
+1 -1
drivers/gpio/Kconfig
··· 47 47 48 48 config OF_GPIO 49 49 def_bool y 50 - depends on OF && !SPARC 50 + depends on OF 51 51 52 52 config DEBUG_GPIO 53 53 bool "Debug GPIO calls"
+1 -12
drivers/scsi/qlogicpti.c
··· 1294 1294 static const struct of_device_id qpti_match[]; 1295 1295 static int __devinit qpti_sbus_probe(struct platform_device *op) 1296 1296 { 1297 - const struct of_device_id *match; 1298 - struct scsi_host_template *tpnt; 1299 1297 struct device_node *dp = op->dev.of_node; 1300 1298 struct Scsi_Host *host; 1301 1299 struct qlogicpti *qpti; 1302 1300 static int nqptis; 1303 1301 const char *fcode; 1304 - 1305 - match = of_match_device(qpti_match, &op->dev); 1306 - if (!match) 1307 - return -EINVAL; 1308 - tpnt = match->data; 1309 1302 1310 1303 /* Sometimes Antares cards come up not completely 1311 1304 * setup, and we get a report of a zero IRQ. ··· 1306 1313 if (op->archdata.irqs[0] == 0) 1307 1314 return -ENODEV; 1308 1315 1309 - host = scsi_host_alloc(tpnt, sizeof(struct qlogicpti)); 1316 + host = scsi_host_alloc(&qpti_template, sizeof(struct qlogicpti)); 1310 1317 if (!host) 1311 1318 return -ENOMEM; 1312 1319 ··· 1438 1445 static const struct of_device_id qpti_match[] = { 1439 1446 { 1440 1447 .name = "ptisp", 1441 - .data = &qpti_template, 1442 1448 }, 1443 1449 { 1444 1450 .name = "PTI,ptisp", 1445 - .data = &qpti_template, 1446 1451 }, 1447 1452 { 1448 1453 .name = "QLGC,isp", 1449 - .data = &qpti_template, 1450 1454 }, 1451 1455 { 1452 1456 .name = "SUNW,isp", 1453 - .data = &qpti_template, 1454 1457 }, 1455 1458 {}, 1456 1459 };
+2
include/linux/of_address.h
··· 28 28 #endif 29 29 30 30 #else /* CONFIG_OF_ADDRESS */ 31 + #ifndef of_address_to_resource 31 32 static inline int of_address_to_resource(struct device_node *dev, int index, 32 33 struct resource *r) 33 34 { 34 35 return -EINVAL; 35 36 } 37 + #endif 36 38 static inline struct device_node *of_find_matching_node_by_address( 37 39 struct device_node *from, 38 40 const struct of_device_id *matches,