Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'x86/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Thomas Gleixner:
"A small set of fixes for x86:

- Add missing instruction suffixes to assembly code so it can be
compiled by newer GAS versions without warnings.

- Switch refcount WARN exceptions to UD2 as we did in general

- Make the reboot on Intel Edison platforms work

- A small documentation update so text and sample command match"

* 'x86/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
Documentation, x86, resctrl: Make text and sample command match
x86/platform/intel-mid: Handle Intel Edison reboot correctly
x86/asm: Add instruction suffixes to bitops
x86/entry/64: Add instruction suffix
x86/refcounts: Switch to UD2 for exceptions

+21 -18
+1 -1
Documentation/x86/intel_rdt_ui.txt
··· 671 671 # mkdir p1 672 672 673 673 Move the cpus 4-7 over to p1 674 - # echo f0 > p0/cpus 674 + # echo f0 > p1/cpus 675 675 676 676 View the llc occupancy snapshot 677 677
+1 -1
arch/x86/entry/entry_64.S
··· 55 55 56 56 .macro TRACE_IRQS_FLAGS flags:req 57 57 #ifdef CONFIG_TRACE_IRQFLAGS 58 - bt $9, \flags /* interrupts off? */ 58 + btl $9, \flags /* interrupts off? */ 59 59 jnc 1f 60 60 TRACE_IRQS_ON 61 61 1:
+16 -13
arch/x86/include/asm/bitops.h
··· 78 78 : "iq" ((u8)CONST_MASK(nr)) 79 79 : "memory"); 80 80 } else { 81 - asm volatile(LOCK_PREFIX "bts %1,%0" 81 + asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0" 82 82 : BITOP_ADDR(addr) : "Ir" (nr) : "memory"); 83 83 } 84 84 } ··· 94 94 */ 95 95 static __always_inline void __set_bit(long nr, volatile unsigned long *addr) 96 96 { 97 - asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); 97 + asm volatile(__ASM_SIZE(bts) " %1,%0" : ADDR : "Ir" (nr) : "memory"); 98 98 } 99 99 100 100 /** ··· 115 115 : CONST_MASK_ADDR(nr, addr) 116 116 : "iq" ((u8)~CONST_MASK(nr))); 117 117 } else { 118 - asm volatile(LOCK_PREFIX "btr %1,%0" 118 + asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0" 119 119 : BITOP_ADDR(addr) 120 120 : "Ir" (nr)); 121 121 } ··· 137 137 138 138 static __always_inline void __clear_bit(long nr, volatile unsigned long *addr) 139 139 { 140 - asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); 140 + asm volatile(__ASM_SIZE(btr) " %1,%0" : ADDR : "Ir" (nr)); 141 141 } 142 142 143 143 static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr) ··· 182 182 */ 183 183 static __always_inline void __change_bit(long nr, volatile unsigned long *addr) 184 184 { 185 - asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); 185 + asm volatile(__ASM_SIZE(btc) " %1,%0" : ADDR : "Ir" (nr)); 186 186 } 187 187 188 188 /** ··· 201 201 : CONST_MASK_ADDR(nr, addr) 202 202 : "iq" ((u8)CONST_MASK(nr))); 203 203 } else { 204 - asm volatile(LOCK_PREFIX "btc %1,%0" 204 + asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0" 205 205 : BITOP_ADDR(addr) 206 206 : "Ir" (nr)); 207 207 } ··· 217 217 */ 218 218 static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr) 219 219 { 220 - GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", c); 220 + GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts), 221 + *addr, "Ir", nr, "%0", c); 221 222 } 222 223 223 224 /** ··· 247 246 { 248 247 bool oldbit; 249 248 250 - asm("bts %2,%1" 249 + asm(__ASM_SIZE(bts) " %2,%1" 251 250 CC_SET(c) 252 251 : CC_OUT(c) (oldbit), ADDR 253 252 : "Ir" (nr)); ··· 264 263 */ 265 264 static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) 266 265 { 267 - GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", c); 266 + GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btr), 267 + *addr, "Ir", nr, "%0", c); 268 268 } 269 269 270 270 /** ··· 288 286 { 289 287 bool oldbit; 290 288 291 - asm volatile("btr %2,%1" 289 + asm volatile(__ASM_SIZE(btr) " %2,%1" 292 290 CC_SET(c) 293 291 : CC_OUT(c) (oldbit), ADDR 294 292 : "Ir" (nr)); ··· 300 298 { 301 299 bool oldbit; 302 300 303 - asm volatile("btc %2,%1" 301 + asm volatile(__ASM_SIZE(btc) " %2,%1" 304 302 CC_SET(c) 305 303 : CC_OUT(c) (oldbit), ADDR 306 304 : "Ir" (nr) : "memory"); ··· 318 316 */ 319 317 static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr) 320 318 { 321 - GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", c); 319 + GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), 320 + *addr, "Ir", nr, "%0", c); 322 321 } 323 322 324 323 static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr) ··· 332 329 { 333 330 bool oldbit; 334 331 335 - asm volatile("bt %2,%1" 332 + asm volatile(__ASM_SIZE(bt) " %2,%1" 336 333 CC_SET(c) 337 334 : CC_OUT(c) (oldbit) 338 335 : "m" (*(unsigned long *)addr), "Ir" (nr));
+1 -1
arch/x86/include/asm/percpu.h
··· 526 526 { 527 527 bool oldbit; 528 528 529 - asm volatile("bt "__percpu_arg(2)",%1" 529 + asm volatile("btl "__percpu_arg(2)",%1" 530 530 CC_SET(c) 531 531 : CC_OUT(c) (oldbit) 532 532 : "m" (*(unsigned long __percpu *)addr), "Ir" (nr));
+1 -1
arch/x86/include/asm/refcount.h
··· 17 17 #define _REFCOUNT_EXCEPTION \ 18 18 ".pushsection .text..refcount\n" \ 19 19 "111:\tlea %[counter], %%" _ASM_CX "\n" \ 20 - "112:\t" ASM_UD0 "\n" \ 20 + "112:\t" ASM_UD2 "\n" \ 21 21 ASM_UNREACHABLE \ 22 22 ".popsection\n" \ 23 23 "113:\n" \
+1 -1
arch/x86/platform/intel-mid/intel-mid.c
··· 79 79 80 80 static void intel_mid_reboot(void) 81 81 { 82 - intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0); 82 + intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0); 83 83 } 84 84 85 85 static unsigned long __init intel_mid_calibrate_tsc(void)