Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

arm64: futex: Refactor futex atomic operation

Refactor the futex atomic operations using ll/sc instructions in
preparation for FEAT_LSUI support. In addition, use named operands for
the inline asm.

No functional change.

Signed-off-by: Yeoreum Yun <yeoreum.yun@arm.com>
[catalin.marinas@arm.com: remove unnecessary stringify.h include]
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>

authored by

Yeoreum Yun and committed by
Catalin Marinas
eaa3babc 42550d7d

+97 -58
+97 -58
arch/arm64/include/asm/futex.h
··· 12 12 13 13 #define FUTEX_MAX_LOOPS 128 /* What's the largest number you can think of? */ 14 14 15 - #define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \ 16 - do { \ 15 + #define LLSC_FUTEX_ATOMIC_OP(op, insn) \ 16 + static __always_inline int \ 17 + __llsc_futex_atomic_##op(int oparg, u32 __user *uaddr, int *oval) \ 18 + { \ 17 19 unsigned int loops = FUTEX_MAX_LOOPS; \ 20 + int ret, oldval, newval; \ 18 21 \ 19 22 uaccess_enable_privileged(); \ 20 - asm volatile( \ 21 - " prfm pstl1strm, %2\n" \ 22 - "1: ldxr %w1, %2\n" \ 23 + asm volatile("// __llsc_futex_atomic_" #op "\n" \ 24 + " prfm pstl1strm, %[uaddr]\n" \ 25 + "1: ldxr %w[oldval], %[uaddr]\n" \ 23 26 insn "\n" \ 24 - "2: stlxr %w0, %w3, %2\n" \ 25 - " cbz %w0, 3f\n" \ 26 - " sub %w4, %w4, %w0\n" \ 27 - " cbnz %w4, 1b\n" \ 28 - " mov %w0, %w6\n" \ 27 + "2: stlxr %w[ret], %w[newval], %[uaddr]\n" \ 28 + " cbz %w[ret], 3f\n" \ 29 + " sub %w[loops], %w[loops], %w[ret]\n" \ 30 + " cbnz %w[loops], 1b\n" \ 31 + " mov %w[ret], %w[err]\n" \ 29 32 "3:\n" \ 30 33 " dmb ish\n" \ 31 - _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %w0) \ 32 - _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %w0) \ 33 - : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp), \ 34 - "+r" (loops) \ 35 - : "r" (oparg), "Ir" (-EAGAIN) \ 34 + _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %w[ret]) \ 35 + _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %w[ret]) \ 36 + : [ret] "=&r" (ret), [oldval] "=&r" (oldval), \ 37 + [uaddr] "+Q" (*uaddr), [newval] "=&r" (newval), \ 38 + [loops] "+r" (loops) \ 39 + : [oparg] "r" (oparg), [err] "Ir" (-EAGAIN) \ 36 40 : "memory"); \ 37 41 uaccess_disable_privileged(); \ 38 - } while (0) 42 + \ 43 + if (!ret) \ 44 + *oval = oldval; \ 45 + \ 46 + return ret; \ 47 + } 48 + 49 + LLSC_FUTEX_ATOMIC_OP(add, "add %w[newval], %w[oldval], %w[oparg]") 50 + LLSC_FUTEX_ATOMIC_OP(or, "orr %w[newval], %w[oldval], %w[oparg]") 51 + LLSC_FUTEX_ATOMIC_OP(and, "and %w[newval], %w[oldval], %w[oparg]") 52 + LLSC_FUTEX_ATOMIC_OP(eor, "eor %w[newval], %w[oldval], %w[oparg]") 53 + LLSC_FUTEX_ATOMIC_OP(set, "mov %w[newval], %w[oparg]") 54 + 55 + static __always_inline int 56 + __llsc_futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval) 57 + { 58 + int ret = 0; 59 + unsigned int loops = FUTEX_MAX_LOOPS; 60 + u32 val, tmp; 61 + 62 + uaccess_enable_privileged(); 63 + asm volatile("//__llsc_futex_cmpxchg\n" 64 + " prfm pstl1strm, %[uaddr]\n" 65 + "1: ldxr %w[curval], %[uaddr]\n" 66 + " eor %w[tmp], %w[curval], %w[oldval]\n" 67 + " cbnz %w[tmp], 4f\n" 68 + "2: stlxr %w[tmp], %w[newval], %[uaddr]\n" 69 + " cbz %w[tmp], 3f\n" 70 + " sub %w[loops], %w[loops], %w[tmp]\n" 71 + " cbnz %w[loops], 1b\n" 72 + " mov %w[ret], %w[err]\n" 73 + "3:\n" 74 + " dmb ish\n" 75 + "4:\n" 76 + _ASM_EXTABLE_UACCESS_ERR(1b, 4b, %w[ret]) 77 + _ASM_EXTABLE_UACCESS_ERR(2b, 4b, %w[ret]) 78 + : [ret] "+r" (ret), [curval] "=&r" (val), 79 + [uaddr] "+Q" (*uaddr), [tmp] "=&r" (tmp), 80 + [loops] "+r" (loops) 81 + : [oldval] "r" (oldval), [newval] "r" (newval), 82 + [err] "Ir" (-EAGAIN) 83 + : "memory"); 84 + uaccess_disable_privileged(); 85 + 86 + if (!ret) 87 + *oval = val; 88 + 89 + return ret; 90 + } 91 + 92 + #define FUTEX_ATOMIC_OP(op) \ 93 + static __always_inline int \ 94 + __futex_atomic_##op(int oparg, u32 __user *uaddr, int *oval) \ 95 + { \ 96 + return __llsc_futex_atomic_##op(oparg, uaddr, oval); \ 97 + } 98 + 99 + FUTEX_ATOMIC_OP(add) 100 + FUTEX_ATOMIC_OP(or) 101 + FUTEX_ATOMIC_OP(and) 102 + FUTEX_ATOMIC_OP(eor) 103 + FUTEX_ATOMIC_OP(set) 104 + 105 + static __always_inline int 106 + __futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval) 107 + { 108 + return __llsc_futex_cmpxchg(uaddr, oldval, newval, oval); 109 + } 39 110 40 111 static inline int 41 112 arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr) 42 113 { 43 - int oldval = 0, ret, tmp; 44 - u32 __user *uaddr = __uaccess_mask_ptr(_uaddr); 114 + int ret; 115 + u32 __user *uaddr; 45 116 46 117 if (!access_ok(_uaddr, sizeof(u32))) 47 118 return -EFAULT; 48 119 120 + uaddr = __uaccess_mask_ptr(_uaddr); 121 + 49 122 switch (op) { 50 123 case FUTEX_OP_SET: 51 - __futex_atomic_op("mov %w3, %w5", 52 - ret, oldval, uaddr, tmp, oparg); 124 + ret = __futex_atomic_set(oparg, uaddr, oval); 53 125 break; 54 126 case FUTEX_OP_ADD: 55 - __futex_atomic_op("add %w3, %w1, %w5", 56 - ret, oldval, uaddr, tmp, oparg); 127 + ret = __futex_atomic_add(oparg, uaddr, oval); 57 128 break; 58 129 case FUTEX_OP_OR: 59 - __futex_atomic_op("orr %w3, %w1, %w5", 60 - ret, oldval, uaddr, tmp, oparg); 130 + ret = __futex_atomic_or(oparg, uaddr, oval); 61 131 break; 62 132 case FUTEX_OP_ANDN: 63 - __futex_atomic_op("and %w3, %w1, %w5", 64 - ret, oldval, uaddr, tmp, ~oparg); 133 + ret = __futex_atomic_and(~oparg, uaddr, oval); 65 134 break; 66 135 case FUTEX_OP_XOR: 67 - __futex_atomic_op("eor %w3, %w1, %w5", 68 - ret, oldval, uaddr, tmp, oparg); 136 + ret = __futex_atomic_eor(oparg, uaddr, oval); 69 137 break; 70 138 default: 71 139 ret = -ENOSYS; 72 140 } 73 - 74 - if (!ret) 75 - *oval = oldval; 76 141 77 142 return ret; 78 143 } ··· 146 81 futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr, 147 82 u32 oldval, u32 newval) 148 83 { 149 - int ret = 0; 150 - unsigned int loops = FUTEX_MAX_LOOPS; 151 - u32 val, tmp; 152 84 u32 __user *uaddr; 153 85 154 86 if (!access_ok(_uaddr, sizeof(u32))) 155 87 return -EFAULT; 156 88 157 89 uaddr = __uaccess_mask_ptr(_uaddr); 158 - uaccess_enable_privileged(); 159 - asm volatile("// futex_atomic_cmpxchg_inatomic\n" 160 - " prfm pstl1strm, %2\n" 161 - "1: ldxr %w1, %2\n" 162 - " sub %w3, %w1, %w5\n" 163 - " cbnz %w3, 4f\n" 164 - "2: stlxr %w3, %w6, %2\n" 165 - " cbz %w3, 3f\n" 166 - " sub %w4, %w4, %w3\n" 167 - " cbnz %w4, 1b\n" 168 - " mov %w0, %w7\n" 169 - "3:\n" 170 - " dmb ish\n" 171 - "4:\n" 172 - _ASM_EXTABLE_UACCESS_ERR(1b, 4b, %w0) 173 - _ASM_EXTABLE_UACCESS_ERR(2b, 4b, %w0) 174 - : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp), "+r" (loops) 175 - : "r" (oldval), "r" (newval), "Ir" (-EAGAIN) 176 - : "memory"); 177 - uaccess_disable_privileged(); 178 90 179 - if (!ret) 180 - *uval = val; 181 - 182 - return ret; 91 + return __futex_cmpxchg(uaddr, oldval, newval, uval); 183 92 } 184 93 185 94 #endif /* __ASM_FUTEX_H */