Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus

* 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus:
[MIPS] vmlinux.lds.S: handle .text.*
[MIPS] Fix potential latency problem due to non-atomic cpu_wait.
[MIPS] SMTC: Clear TIF_FPUBOUND on clone / fork.
[MIPS] Fix 64-bit IP checksum code

+75 -24
+2 -14
arch/mips/kernel/cpu-probe.c
··· 45 45 local_irq_enable(); 46 46 } 47 47 48 - /* 49 - * There is a race when WAIT instruction executed with interrupt 50 - * enabled. 51 - * But it is implementation-dependent wheter the pipelie restarts when 52 - * a non-enabled interrupt is requested. 53 - */ 54 - static void r4k_wait(void) 55 - { 56 - __asm__(" .set mips3 \n" 57 - " wait \n" 58 - " .set mips0 \n"); 59 - } 48 + extern void r4k_wait(void); 60 49 61 50 /* 62 51 * This variant is preferable as it allows testing need_resched and going to ··· 117 128 118 129 __setup("nowait", wait_disable); 119 130 120 - static inline void check_wait(void) 131 + void __init check_wait(void) 121 132 { 122 133 struct cpuinfo_mips *c = &current_cpu_data; 123 134 ··· 231 242 232 243 void __init check_bugs32(void) 233 244 { 234 - check_wait(); 235 245 check_errata(); 236 246 } 237 247
+37
arch/mips/kernel/genex.S
··· 20 20 #include <asm/stackframe.h> 21 21 #include <asm/war.h> 22 22 #include <asm/page.h> 23 + #include <asm/thread_info.h> 23 24 24 25 #define PANIC_PIC(msg) \ 25 26 .set push; \ ··· 127 126 128 127 __FINIT 129 128 129 + .align 5 /* 32 byte rollback region */ 130 + LEAF(r4k_wait) 131 + .set push 132 + .set noreorder 133 + /* start of rollback region */ 134 + LONG_L t0, TI_FLAGS($28) 135 + nop 136 + andi t0, _TIF_NEED_RESCHED 137 + bnez t0, 1f 138 + nop 139 + nop 140 + nop 141 + .set mips3 142 + wait 143 + /* end of rollback region (the region size must be power of two) */ 144 + .set pop 145 + 1: 146 + jr ra 147 + END(r4k_wait) 148 + 149 + .macro BUILD_ROLLBACK_PROLOGUE handler 150 + FEXPORT(rollback_\handler) 151 + .set push 152 + .set noat 153 + MFC0 k0, CP0_EPC 154 + PTR_LA k1, r4k_wait 155 + ori k0, 0x1f /* 32 byte rollback region */ 156 + xori k0, 0x1f 157 + bne k0, k1, 9f 158 + MTC0 k0, CP0_EPC 159 + 9: 160 + .set pop 161 + .endm 162 + 130 163 .align 5 164 + BUILD_ROLLBACK_PROLOGUE handle_int 131 165 NESTED(handle_int, PT_SIZE, sp) 132 166 #ifdef CONFIG_TRACE_IRQFLAGS 133 167 /* ··· 237 201 * This prototype is copied to ebase + n*IntCtl.VS and patched 238 202 * to invoke the handler 239 203 */ 204 + BUILD_ROLLBACK_PROLOGUE except_vec_vi 240 205 NESTED(except_vec_vi, 0, sp) 241 206 SAVE_SOME 242 207 SAVE_AT
+2
arch/mips/kernel/process.c
··· 148 148 clear_tsk_thread_flag(p, TIF_USEDFPU); 149 149 150 150 #ifdef CONFIG_MIPS_MT_FPAFF 151 + clear_tsk_thread_flag(p, TIF_FPUBOUND); 152 + 151 153 /* 152 154 * FPU affinity support is cleaner if we track the 153 155 * user-visible CPU affinity from the very beginning.
+16 -6
arch/mips/kernel/traps.c
··· 46 46 #include <asm/types.h> 47 47 #include <asm/stacktrace.h> 48 48 49 + extern void check_wait(void); 50 + extern asmlinkage void r4k_wait(void); 51 + extern asmlinkage void rollback_handle_int(void); 49 52 extern asmlinkage void handle_int(void); 50 53 extern asmlinkage void handle_tlbm(void); 51 54 extern asmlinkage void handle_tlbl(void); ··· 1254 1251 1255 1252 extern char except_vec_vi, except_vec_vi_lui; 1256 1253 extern char except_vec_vi_ori, except_vec_vi_end; 1254 + extern char rollback_except_vec_vi; 1255 + char *vec_start = (cpu_wait == r4k_wait) ? 1256 + &rollback_except_vec_vi : &except_vec_vi; 1257 1257 #ifdef CONFIG_MIPS_MT_SMTC 1258 1258 /* 1259 1259 * We need to provide the SMTC vectored interrupt handler ··· 1264 1258 * Status.IM bit to be masked before going there. 1265 1259 */ 1266 1260 extern char except_vec_vi_mori; 1267 - const int mori_offset = &except_vec_vi_mori - &except_vec_vi; 1261 + const int mori_offset = &except_vec_vi_mori - vec_start; 1268 1262 #endif /* CONFIG_MIPS_MT_SMTC */ 1269 - const int handler_len = &except_vec_vi_end - &except_vec_vi; 1270 - const int lui_offset = &except_vec_vi_lui - &except_vec_vi; 1271 - const int ori_offset = &except_vec_vi_ori - &except_vec_vi; 1263 + const int handler_len = &except_vec_vi_end - vec_start; 1264 + const int lui_offset = &except_vec_vi_lui - vec_start; 1265 + const int ori_offset = &except_vec_vi_ori - vec_start; 1272 1266 1273 1267 if (handler_len > VECTORSPACING) { 1274 1268 /* ··· 1278 1272 panic("VECTORSPACING too small"); 1279 1273 } 1280 1274 1281 - memcpy(b, &except_vec_vi, handler_len); 1275 + memcpy(b, vec_start, handler_len); 1282 1276 #ifdef CONFIG_MIPS_MT_SMTC 1283 1277 BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */ 1284 1278 ··· 1560 1554 extern char except_vec3_generic, except_vec3_r4000; 1561 1555 extern char except_vec4; 1562 1556 unsigned long i; 1557 + int rollback; 1558 + 1559 + check_wait(); 1560 + rollback = (cpu_wait == r4k_wait); 1563 1561 1564 1562 #if defined(CONFIG_KGDB) 1565 1563 if (kgdb_early_setup) ··· 1628 1618 if (board_be_init) 1629 1619 board_be_init(); 1630 1620 1631 - set_except_vector(0, handle_int); 1621 + set_except_vector(0, rollback ? rollback_handle_int : handle_int); 1632 1622 set_except_vector(1, handle_tlbm); 1633 1623 set_except_vector(2, handle_tlbl); 1634 1624 set_except_vector(3, handle_tlbs);
+1
arch/mips/kernel/vmlinux.lds.S
··· 36 36 SCHED_TEXT 37 37 LOCK_TEXT 38 38 KPROBES_TEXT 39 + *(.text.*) 39 40 *(.fixup) 40 41 *(.gnu.warning) 41 42 } :text = 0
+17 -4
arch/mips/lib/csum_partial.S
··· 39 39 #ifdef USE_DOUBLE 40 40 41 41 #define LOAD ld 42 + #define LOAD32 lwu 42 43 #define ADD daddu 43 44 #define NBYTES 8 44 45 45 46 #else 46 47 47 48 #define LOAD lw 49 + #define LOAD32 lw 48 50 #define ADD addu 49 51 #define NBYTES 4 50 52 ··· 60 58 ADD sum, reg; \ 61 59 sltu v1, sum, reg; \ 62 60 ADD sum, v1; \ 61 + .set pop 62 + 63 + #define ADDC32(sum,reg) \ 64 + .set push; \ 65 + .set noat; \ 66 + addu sum, reg; \ 67 + sltu v1, sum, reg; \ 68 + addu sum, v1; \ 63 69 .set pop 64 70 65 71 #define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3) \ ··· 142 132 beqz t8, .Lqword_align 143 133 andi t8, src, 0x8 144 134 145 - lw t0, 0x00(src) 135 + LOAD32 t0, 0x00(src) 146 136 LONG_SUBU a1, a1, 0x4 147 137 ADDC(sum, t0) 148 138 PTR_ADDU src, src, 0x4 ··· 221 211 LONG_SRL t8, t8, 0x2 222 212 223 213 .Lend_words: 224 - lw t0, (src) 214 + LOAD32 t0, (src) 225 215 LONG_SUBU t8, t8, 0x1 226 216 ADDC(sum, t0) 227 217 .set reorder /* DADDI_WAR */ ··· 240 230 /* Still a full word to go */ 241 231 ulw t1, (src) 242 232 PTR_ADDIU src, 4 233 + #ifdef USE_DOUBLE 234 + dsll t1, t1, 32 /* clear lower 32bit */ 235 + #endif 243 236 ADDC(sum, t1) 244 237 245 238 1: move t1, zero ··· 293 280 1: 294 281 .set reorder 295 282 /* Add the passed partial csum. */ 296 - ADDC(sum, a2) 283 + ADDC32(sum, a2) 297 284 jr ra 298 285 .set noreorder 299 286 END(csum_partial) ··· 694 681 .set pop 695 682 1: 696 683 .set reorder 697 - ADDC(sum, psum) 684 + ADDC32(sum, psum) 698 685 jr ra 699 686 .set noreorder 700 687