Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus

Pull MIPS fixes from Ralf Baechle:
"Here's a final round of fixes for 4.12:

- Fix misordered instructions in assembly code making kenel startup
via UHB unreliable.

- Fix special case of MADDF and MADDF emulation.

- Fix alignment issue in address calculation in pm-cps on 64 bit.

- Fix IRQ tracing & lockdep when rescheduling

- Systems with MAARs require post-DMA cache flushes.

The reordering fix and the MADDF/MSUBF fix have sat in linux-next for
a number of days. The others haven't propagated from my pull tree to
linux-next yet but all have survived manual testing and Imagination's
automated test system and there are no pending bug reports"

* 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus:
MIPS: Avoid accidental raw backtrace
MIPS: Perform post-DMA cache flushes on systems with MAARs
MIPS: Fix IRQ tracing & lockdep when rescheduling
MIPS: pm-cps: Drop manual cache-line alignment of ready_count
MIPS: math-emu: Handle zero accumulator case in MADDF and MSUBF separately
MIPS: head: Reorder instructions missing a delay slot

+33 -16
+3
arch/mips/kernel/entry.S
··· 11 11 #include <asm/asm.h> 12 12 #include <asm/asmmacro.h> 13 13 #include <asm/compiler.h> 14 + #include <asm/irqflags.h> 14 15 #include <asm/regdef.h> 15 16 #include <asm/mipsregs.h> 16 17 #include <asm/stackframe.h> ··· 120 119 andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS 121 120 beqz t0, work_notifysig 122 121 work_resched: 122 + TRACE_IRQS_OFF 123 123 jal schedule 124 124 125 125 local_irq_disable # make sure need_resched and ··· 157 155 beqz t0, work_pending # trace bit set? 158 156 local_irq_enable # could let syscall_trace_leave() 159 157 # call schedule() instead 158 + TRACE_IRQS_ON 160 159 move a0, sp 161 160 jal syscall_trace_leave 162 161 b resume_userspace
+1 -1
arch/mips/kernel/head.S
··· 106 106 beq t0, t1, dtb_found 107 107 #endif 108 108 li t1, -2 109 - beq a0, t1, dtb_found 110 109 move t2, a1 110 + beq a0, t1, dtb_found 111 111 112 112 li t2, 0 113 113 dtb_found:
+1 -8
arch/mips/kernel/pm-cps.c
··· 56 56 * state. Actually per-core rather than per-CPU. 57 57 */ 58 58 static DEFINE_PER_CPU_ALIGNED(u32*, ready_count); 59 - static DEFINE_PER_CPU_ALIGNED(void*, ready_count_alloc); 60 59 61 60 /* Indicates online CPUs coupled with the current CPU */ 62 61 static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled); ··· 641 642 { 642 643 enum cps_pm_state state; 643 644 unsigned core = cpu_data[cpu].core; 644 - unsigned dlinesz = cpu_data[cpu].dcache.linesz; 645 645 void *entry_fn, *core_rc; 646 646 647 647 for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) { ··· 660 662 } 661 663 662 664 if (!per_cpu(ready_count, core)) { 663 - core_rc = kmalloc(dlinesz * 2, GFP_KERNEL); 665 + core_rc = kmalloc(sizeof(u32), GFP_KERNEL); 664 666 if (!core_rc) { 665 667 pr_err("Failed allocate core %u ready_count\n", core); 666 668 return -ENOMEM; 667 669 } 668 - per_cpu(ready_count_alloc, core) = core_rc; 669 - 670 - /* Ensure ready_count is aligned to a cacheline boundary */ 671 - core_rc += dlinesz - 1; 672 - core_rc = (void *)((unsigned long)core_rc & ~(dlinesz - 1)); 673 670 per_cpu(ready_count, core) = core_rc; 674 671 } 675 672
+2
arch/mips/kernel/traps.c
··· 201 201 { 202 202 struct pt_regs regs; 203 203 mm_segment_t old_fs = get_fs(); 204 + 205 + regs.cp0_status = KSU_KERNEL; 204 206 if (sp) { 205 207 regs.regs[29] = (unsigned long)sp; 206 208 regs.regs[31] = 0;
+4 -1
arch/mips/math-emu/dp_maddf.c
··· 54 54 return ieee754dp_nanxcpt(z); 55 55 case IEEE754_CLASS_DNORM: 56 56 DPDNORMZ; 57 - /* QNAN is handled separately below */ 57 + /* QNAN and ZERO cases are handled separately below */ 58 58 } 59 59 60 60 switch (CLPAIR(xc, yc)) { ··· 209 209 ((rm << (DP_FBITS + 1 + 3 + 1)) != 0); 210 210 } 211 211 assert(rm & (DP_HIDDEN_BIT << 3)); 212 + 213 + if (zc == IEEE754_CLASS_ZERO) 214 + return ieee754dp_format(rs, re, rm); 212 215 213 216 /* And now the addition */ 214 217 assert(zm & DP_HIDDEN_BIT);
+4 -1
arch/mips/math-emu/sp_maddf.c
··· 54 54 return ieee754sp_nanxcpt(z); 55 55 case IEEE754_CLASS_DNORM: 56 56 SPDNORMZ; 57 - /* QNAN is handled separately below */ 57 + /* QNAN and ZERO cases are handled separately below */ 58 58 } 59 59 60 60 switch (CLPAIR(xc, yc)) { ··· 202 202 ((rm << (SP_FBITS + 1 + 3 + 1)) != 0); 203 203 } 204 204 assert(rm & (SP_HIDDEN_BIT << 3)); 205 + 206 + if (zc == IEEE754_CLASS_ZERO) 207 + return ieee754sp_format(rs, re, rm); 205 208 206 209 /* And now the addition */ 207 210
+18 -5
arch/mips/mm/dma-default.c
··· 68 68 * systems and only the R10000 and R12000 are used in such systems, the 69 69 * SGI IP28 Indigo² rsp. SGI IP32 aka O2. 70 70 */ 71 - static inline int cpu_needs_post_dma_flush(struct device *dev) 71 + static inline bool cpu_needs_post_dma_flush(struct device *dev) 72 72 { 73 - return !plat_device_is_coherent(dev) && 74 - (boot_cpu_type() == CPU_R10000 || 75 - boot_cpu_type() == CPU_R12000 || 76 - boot_cpu_type() == CPU_BMIPS5000); 73 + if (plat_device_is_coherent(dev)) 74 + return false; 75 + 76 + switch (boot_cpu_type()) { 77 + case CPU_R10000: 78 + case CPU_R12000: 79 + case CPU_BMIPS5000: 80 + return true; 81 + 82 + default: 83 + /* 84 + * Presence of MAARs suggests that the CPU supports 85 + * speculatively prefetching data, and therefore requires 86 + * the post-DMA flush/invalidate. 87 + */ 88 + return cpu_has_maar; 89 + } 77 90 } 78 91 79 92 static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)