Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'riscv-for-linus-4.15-rc2_cleanups' of git://git.kernel.org/pub/scm/linux/kernel/git/palmer/linux

Pull RISC-V cleanups and ABI fixes from Palmer Dabbelt:
"This contains a handful of small cleanups that are a result of
feedback that didn't make it into our original patch set, either
because the feedback hadn't been given yet, I missed the original
emails, or we weren't ready to submit the changes yet.

I've been maintaining the various cleanup patch sets I have as their
own branches, which I then merged together and signed. Each merge
commit has a short summary of the changes, and each branch is based on
your latest tag (4.15-rc1, in this case). If this isn't the right way
to do this then feel free to suggest something else, but it seems sane
to me.

Here's a short summary of the changes, roughly in order of how
interesting they are.

- libgcc.h has been moved from include/lib, where it's the only
member, to include/linux. This is meant to avoid tab completion
conflicts.

- VDSO entries for clock_get/gettimeofday/getcpu have been added.
These are simple syscalls now, but we want to let glibc use them
from the start so we can make them faster later.

- A VDSO entry for instruction cache flushing has been added so
userspace can flush the instruction cache.

- The VDSO symbol versions for __vdso_cmpxchg{32,64} have been
removed, as those VDSO entries don't actually exist.

- __io_writes has been corrected to respect the given type.

- A new READ_ONCE in arch_spin_is_locked().

- __test_and_op_bit_ord() is now actually ordered.

- Various small fixes throughout the tree to enable allmodconfig to
build cleanly.

- Removal of some dead code in our atomic support headers.

- Improvements to various comments in our atomic support headers"

* tag 'riscv-for-linus-4.15-rc2_cleanups' of git://git.kernel.org/pub/scm/linux/kernel/git/palmer/linux: (23 commits)
RISC-V: __io_writes should respect the length argument
move libgcc.h to include/linux
RISC-V: Clean up an unused include
RISC-V: Allow userspace to flush the instruction cache
RISC-V: Flush I$ when making a dirty page executable
RISC-V: Add missing include
RISC-V: Use define for get_cycles like other architectures
RISC-V: Provide stub of setup_profiling_timer()
RISC-V: Export some expected symbols for modules
RISC-V: move empty_zero_page definition to C and export it
RISC-V: io.h: type fixes for warnings
RISC-V: use RISCV_{INT,SHORT} instead of {INT,SHORT} for asm macros
RISC-V: use generic serial.h
RISC-V: remove spin_unlock_wait()
RISC-V: `sfence.vma` orderes the instruction cache
RISC-V: Add READ_ONCE in arch_spin_is_locked()
RISC-V: __test_and_op_bit_ord should be strongly ordered
RISC-V: Remove smb_mb__{before,after}_spinlock()
RISC-V: Remove __smp_bp__{before,after}_atomic
RISC-V: Comment on why {,cmp}xchg is ordered how it is
...

+498 -146
+1
arch/riscv/include/asm/Kbuild
··· 40 40 generic-y += scatterlist.h 41 41 generic-y += sections.h 42 42 generic-y += sembuf.h 43 + generic-y += serial.h 43 44 generic-y += setup.h 44 45 generic-y += shmbuf.h 45 46 generic-y += shmparam.h
+6 -6
arch/riscv/include/asm/asm.h
··· 58 58 #endif 59 59 60 60 #if (__SIZEOF_INT__ == 4) 61 - #define INT __ASM_STR(.word) 62 - #define SZINT __ASM_STR(4) 63 - #define LGINT __ASM_STR(2) 61 + #define RISCV_INT __ASM_STR(.word) 62 + #define RISCV_SZINT __ASM_STR(4) 63 + #define RISCV_LGINT __ASM_STR(2) 64 64 #else 65 65 #error "Unexpected __SIZEOF_INT__" 66 66 #endif 67 67 68 68 #if (__SIZEOF_SHORT__ == 2) 69 - #define SHORT __ASM_STR(.half) 70 - #define SZSHORT __ASM_STR(2) 71 - #define LGSHORT __ASM_STR(1) 69 + #define RISCV_SHORT __ASM_STR(.half) 70 + #define RISCV_SZSHORT __ASM_STR(2) 71 + #define RISCV_LGSHORT __ASM_STR(1) 72 72 #else 73 73 #error "Unexpected __SIZEOF_SHORT__" 74 74 #endif
+54 -49
arch/riscv/include/asm/atomic.h
··· 50 50 * have the AQ or RL bits set. These don't return anything, so there's only 51 51 * one version to worry about. 52 52 */ 53 - #define ATOMIC_OP(op, asm_op, c_op, I, asm_type, c_type, prefix) \ 54 - static __always_inline void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \ 55 - { \ 56 - __asm__ __volatile__ ( \ 57 - "amo" #asm_op "." #asm_type " zero, %1, %0" \ 58 - : "+A" (v->counter) \ 59 - : "r" (I) \ 60 - : "memory"); \ 53 + #define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix) \ 54 + static __always_inline void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \ 55 + { \ 56 + __asm__ __volatile__ ( \ 57 + "amo" #asm_op "." #asm_type " zero, %1, %0" \ 58 + : "+A" (v->counter) \ 59 + : "r" (I) \ 60 + : "memory"); \ 61 61 } 62 62 63 63 #ifdef CONFIG_GENERIC_ATOMIC64 64 - #define ATOMIC_OPS(op, asm_op, c_op, I) \ 65 - ATOMIC_OP (op, asm_op, c_op, I, w, int, ) 64 + #define ATOMIC_OPS(op, asm_op, I) \ 65 + ATOMIC_OP (op, asm_op, I, w, int, ) 66 66 #else 67 - #define ATOMIC_OPS(op, asm_op, c_op, I) \ 68 - ATOMIC_OP (op, asm_op, c_op, I, w, int, ) \ 69 - ATOMIC_OP (op, asm_op, c_op, I, d, long, 64) 67 + #define ATOMIC_OPS(op, asm_op, I) \ 68 + ATOMIC_OP (op, asm_op, I, w, int, ) \ 69 + ATOMIC_OP (op, asm_op, I, d, long, 64) 70 70 #endif 71 71 72 - ATOMIC_OPS(add, add, +, i) 73 - ATOMIC_OPS(sub, add, +, -i) 74 - ATOMIC_OPS(and, and, &, i) 75 - ATOMIC_OPS( or, or, |, i) 76 - ATOMIC_OPS(xor, xor, ^, i) 72 + ATOMIC_OPS(add, add, i) 73 + ATOMIC_OPS(sub, add, -i) 74 + ATOMIC_OPS(and, and, i) 75 + ATOMIC_OPS( or, or, i) 76 + ATOMIC_OPS(xor, xor, i) 77 77 78 78 #undef ATOMIC_OP 79 79 #undef ATOMIC_OPS ··· 83 83 * There's two flavors of these: the arithmatic ops have both fetch and return 84 84 * versions, while the logical ops only have fetch versions. 85 85 */ 86 - #define ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, asm_type, c_type, prefix) \ 86 + #define ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, asm_type, c_type, prefix) \ 87 87 static __always_inline c_type atomic##prefix##_fetch_##op##c_or(c_type i, atomic##prefix##_t *v) \ 88 88 { \ 89 89 register c_type ret; \ ··· 103 103 104 104 #ifdef CONFIG_GENERIC_ATOMIC64 105 105 #define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or) \ 106 - ATOMIC_FETCH_OP (op, asm_op, c_op, I, asm_or, c_or, w, int, ) \ 106 + ATOMIC_FETCH_OP (op, asm_op, I, asm_or, c_or, w, int, ) \ 107 107 ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, w, int, ) 108 108 #else 109 109 #define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or) \ 110 - ATOMIC_FETCH_OP (op, asm_op, c_op, I, asm_or, c_or, w, int, ) \ 110 + ATOMIC_FETCH_OP (op, asm_op, I, asm_or, c_or, w, int, ) \ 111 111 ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, w, int, ) \ 112 - ATOMIC_FETCH_OP (op, asm_op, c_op, I, asm_or, c_or, d, long, 64) \ 112 + ATOMIC_FETCH_OP (op, asm_op, I, asm_or, c_or, d, long, 64) \ 113 113 ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_or, c_or, d, long, 64) 114 114 #endif 115 115 ··· 126 126 #undef ATOMIC_OPS 127 127 128 128 #ifdef CONFIG_GENERIC_ATOMIC64 129 - #define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or) \ 130 - ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, w, int, ) 129 + #define ATOMIC_OPS(op, asm_op, I, asm_or, c_or) \ 130 + ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, w, int, ) 131 131 #else 132 - #define ATOMIC_OPS(op, asm_op, c_op, I, asm_or, c_or) \ 133 - ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, w, int, ) \ 134 - ATOMIC_FETCH_OP(op, asm_op, c_op, I, asm_or, c_or, d, long, 64) 132 + #define ATOMIC_OPS(op, asm_op, I, asm_or, c_or) \ 133 + ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, w, int, ) \ 134 + ATOMIC_FETCH_OP(op, asm_op, I, asm_or, c_or, d, long, 64) 135 135 #endif 136 136 137 - ATOMIC_OPS(and, and, &, i, , _relaxed) 138 - ATOMIC_OPS(and, and, &, i, .aq , _acquire) 139 - ATOMIC_OPS(and, and, &, i, .rl , _release) 140 - ATOMIC_OPS(and, and, &, i, .aqrl, ) 137 + ATOMIC_OPS(and, and, i, , _relaxed) 138 + ATOMIC_OPS(and, and, i, .aq , _acquire) 139 + ATOMIC_OPS(and, and, i, .rl , _release) 140 + ATOMIC_OPS(and, and, i, .aqrl, ) 141 141 142 - ATOMIC_OPS( or, or, |, i, , _relaxed) 143 - ATOMIC_OPS( or, or, |, i, .aq , _acquire) 144 - ATOMIC_OPS( or, or, |, i, .rl , _release) 145 - ATOMIC_OPS( or, or, |, i, .aqrl, ) 142 + ATOMIC_OPS( or, or, i, , _relaxed) 143 + ATOMIC_OPS( or, or, i, .aq , _acquire) 144 + ATOMIC_OPS( or, or, i, .rl , _release) 145 + ATOMIC_OPS( or, or, i, .aqrl, ) 146 146 147 - ATOMIC_OPS(xor, xor, ^, i, , _relaxed) 148 - ATOMIC_OPS(xor, xor, ^, i, .aq , _acquire) 149 - ATOMIC_OPS(xor, xor, ^, i, .rl , _release) 150 - ATOMIC_OPS(xor, xor, ^, i, .aqrl, ) 147 + ATOMIC_OPS(xor, xor, i, , _relaxed) 148 + ATOMIC_OPS(xor, xor, i, .aq , _acquire) 149 + ATOMIC_OPS(xor, xor, i, .rl , _release) 150 + ATOMIC_OPS(xor, xor, i, .aqrl, ) 151 151 152 152 #undef ATOMIC_OPS 153 153 ··· 182 182 #undef ATOMIC_OP 183 183 #undef ATOMIC_OPS 184 184 185 - #define ATOMIC_OP(op, func_op, c_op, I, c_type, prefix) \ 185 + #define ATOMIC_OP(op, func_op, I, c_type, prefix) \ 186 186 static __always_inline void atomic##prefix##_##op(atomic##prefix##_t *v) \ 187 187 { \ 188 188 atomic##prefix##_##func_op(I, v); \ 189 189 } 190 190 191 - #define ATOMIC_FETCH_OP(op, func_op, c_op, I, c_type, prefix) \ 191 + #define ATOMIC_FETCH_OP(op, func_op, I, c_type, prefix) \ 192 192 static __always_inline c_type atomic##prefix##_fetch_##op(atomic##prefix##_t *v) \ 193 193 { \ 194 194 return atomic##prefix##_fetch_##func_op(I, v); \ ··· 202 202 203 203 #ifdef CONFIG_GENERIC_ATOMIC64 204 204 #define ATOMIC_OPS(op, asm_op, c_op, I) \ 205 - ATOMIC_OP (op, asm_op, c_op, I, int, ) \ 206 - ATOMIC_FETCH_OP (op, asm_op, c_op, I, int, ) \ 205 + ATOMIC_OP (op, asm_op, I, int, ) \ 206 + ATOMIC_FETCH_OP (op, asm_op, I, int, ) \ 207 207 ATOMIC_OP_RETURN(op, asm_op, c_op, I, int, ) 208 208 #else 209 209 #define ATOMIC_OPS(op, asm_op, c_op, I) \ 210 - ATOMIC_OP (op, asm_op, c_op, I, int, ) \ 211 - ATOMIC_FETCH_OP (op, asm_op, c_op, I, int, ) \ 210 + ATOMIC_OP (op, asm_op, I, int, ) \ 211 + ATOMIC_FETCH_OP (op, asm_op, I, int, ) \ 212 212 ATOMIC_OP_RETURN(op, asm_op, c_op, I, int, ) \ 213 - ATOMIC_OP (op, asm_op, c_op, I, long, 64) \ 214 - ATOMIC_FETCH_OP (op, asm_op, c_op, I, long, 64) \ 213 + ATOMIC_OP (op, asm_op, I, long, 64) \ 214 + ATOMIC_FETCH_OP (op, asm_op, I, long, 64) \ 215 215 ATOMIC_OP_RETURN(op, asm_op, c_op, I, long, 64) 216 216 #endif 217 217 ··· 300 300 301 301 /* 302 302 * atomic_{cmp,}xchg is required to have exactly the same ordering semantics as 303 - * {cmp,}xchg and the operations that return, so they need a barrier. We just 304 - * use the other implementations directly. 303 + * {cmp,}xchg and the operations that return, so they need a barrier. 304 + */ 305 + /* 306 + * FIXME: atomic_cmpxchg_{acquire,release,relaxed} are all implemented by 307 + * assigning the same barrier to both the LR and SC operations, but that might 308 + * not make any sense. We're waiting on a memory model specification to 309 + * determine exactly what the right thing to do is here. 305 310 */ 306 311 #define ATOMIC_OP(c_t, prefix, c_or, size, asm_or) \ 307 312 static __always_inline c_t atomic##prefix##_cmpxchg##c_or(atomic##prefix##_t *v, c_t o, c_t n) \
-23
arch/riscv/include/asm/barrier.h
··· 38 38 #define smp_rmb() RISCV_FENCE(r,r) 39 39 #define smp_wmb() RISCV_FENCE(w,w) 40 40 41 - /* 42 - * These fences exist to enforce ordering around the relaxed AMOs. The 43 - * documentation defines that 44 - * " 45 - * atomic_fetch_add(); 46 - * is equivalent to: 47 - * smp_mb__before_atomic(); 48 - * atomic_fetch_add_relaxed(); 49 - * smp_mb__after_atomic(); 50 - * " 51 - * So we emit full fences on both sides. 52 - */ 53 - #define __smb_mb__before_atomic() smp_mb() 54 - #define __smb_mb__after_atomic() smp_mb() 55 - 56 - /* 57 - * These barriers prevent accesses performed outside a spinlock from being moved 58 - * inside a spinlock. Since RISC-V sets the aq/rl bits on our spinlock only 59 - * enforce release consistency, we need full fences here. 60 - */ 61 - #define smb_mb__before_spinlock() smp_mb() 62 - #define smb_mb__after_spinlock() smp_mb() 63 - 64 41 #include <asm-generic/barrier.h> 65 42 66 43 #endif /* __ASSEMBLY__ */
+1 -1
arch/riscv/include/asm/bitops.h
··· 67 67 : "memory"); 68 68 69 69 #define __test_and_op_bit(op, mod, nr, addr) \ 70 - __test_and_op_bit_ord(op, mod, nr, addr, ) 70 + __test_and_op_bit_ord(op, mod, nr, addr, .aqrl) 71 71 #define __op_bit(op, mod, nr, addr) \ 72 72 __op_bit_ord(op, mod, nr, addr, ) 73 73
+3 -3
arch/riscv/include/asm/bug.h
··· 27 27 typedef u32 bug_insn_t; 28 28 29 29 #ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS 30 - #define __BUG_ENTRY_ADDR INT " 1b - 2b" 31 - #define __BUG_ENTRY_FILE INT " %0 - 2b" 30 + #define __BUG_ENTRY_ADDR RISCV_INT " 1b - 2b" 31 + #define __BUG_ENTRY_FILE RISCV_INT " %0 - 2b" 32 32 #else 33 33 #define __BUG_ENTRY_ADDR RISCV_PTR " 1b" 34 34 #define __BUG_ENTRY_FILE RISCV_PTR " %0" ··· 38 38 #define __BUG_ENTRY \ 39 39 __BUG_ENTRY_ADDR "\n\t" \ 40 40 __BUG_ENTRY_FILE "\n\t" \ 41 - SHORT " %1" 41 + RISCV_SHORT " %1" 42 42 #else 43 43 #define __BUG_ENTRY \ 44 44 __BUG_ENTRY_ADDR
+26 -4
arch/riscv/include/asm/cacheflush.h
··· 18 18 19 19 #undef flush_icache_range 20 20 #undef flush_icache_user_range 21 + #undef flush_dcache_page 21 22 22 23 static inline void local_flush_icache_all(void) 23 24 { 24 25 asm volatile ("fence.i" ::: "memory"); 25 26 } 26 27 28 + #define PG_dcache_clean PG_arch_1 29 + 30 + static inline void flush_dcache_page(struct page *page) 31 + { 32 + if (test_bit(PG_dcache_clean, &page->flags)) 33 + clear_bit(PG_dcache_clean, &page->flags); 34 + } 35 + 36 + /* 37 + * RISC-V doesn't have an instruction to flush parts of the instruction cache, 38 + * so instead we just flush the whole thing. 39 + */ 40 + #define flush_icache_range(start, end) flush_icache_all() 41 + #define flush_icache_user_range(vma, pg, addr, len) flush_icache_all() 42 + 27 43 #ifndef CONFIG_SMP 28 44 29 - #define flush_icache_range(start, end) local_flush_icache_all() 30 - #define flush_icache_user_range(vma, pg, addr, len) local_flush_icache_all() 45 + #define flush_icache_all() local_flush_icache_all() 46 + #define flush_icache_mm(mm, local) flush_icache_all() 31 47 32 48 #else /* CONFIG_SMP */ 33 49 34 - #define flush_icache_range(start, end) sbi_remote_fence_i(0) 35 - #define flush_icache_user_range(vma, pg, addr, len) sbi_remote_fence_i(0) 50 + #define flush_icache_all() sbi_remote_fence_i(0) 51 + void flush_icache_mm(struct mm_struct *mm, bool local); 36 52 37 53 #endif /* CONFIG_SMP */ 54 + 55 + /* 56 + * Bits in sys_riscv_flush_icache()'s flags argument. 57 + */ 58 + #define SYS_RISCV_FLUSH_ICACHE_LOCAL 1UL 59 + #define SYS_RISCV_FLUSH_ICACHE_ALL (SYS_RISCV_FLUSH_ICACHE_LOCAL) 38 60 39 61 #endif /* _ASM_RISCV_CACHEFLUSH_H */
+10 -8
arch/riscv/include/asm/io.h
··· 19 19 #ifndef _ASM_RISCV_IO_H 20 20 #define _ASM_RISCV_IO_H 21 21 22 + #include <linux/types.h> 23 + 22 24 #ifdef CONFIG_MMU 23 25 24 26 extern void __iomem *ioremap(phys_addr_t offset, unsigned long size); ··· 34 32 #define ioremap_wc(addr, size) ioremap((addr), (size)) 35 33 #define ioremap_wt(addr, size) ioremap((addr), (size)) 36 34 37 - extern void iounmap(void __iomem *addr); 35 + extern void iounmap(volatile void __iomem *addr); 38 36 39 37 #endif /* CONFIG_MMU */ 40 38 ··· 252 250 const ctype *buf = buffer; \ 253 251 \ 254 252 do { \ 255 - __raw_writeq(*buf++, addr); \ 253 + __raw_write ## len(*buf++, addr); \ 256 254 } while (--count); \ 257 255 } \ 258 256 afence; \ ··· 268 266 __io_reads_ins(ins, u8, b, __io_pbr(), __io_par()) 269 267 __io_reads_ins(ins, u16, w, __io_pbr(), __io_par()) 270 268 __io_reads_ins(ins, u32, l, __io_pbr(), __io_par()) 271 - #define insb(addr, buffer, count) __insb((void __iomem *)addr, buffer, count) 272 - #define insw(addr, buffer, count) __insw((void __iomem *)addr, buffer, count) 273 - #define insl(addr, buffer, count) __insl((void __iomem *)addr, buffer, count) 269 + #define insb(addr, buffer, count) __insb((void __iomem *)(long)addr, buffer, count) 270 + #define insw(addr, buffer, count) __insw((void __iomem *)(long)addr, buffer, count) 271 + #define insl(addr, buffer, count) __insl((void __iomem *)(long)addr, buffer, count) 274 272 275 273 __io_writes_outs(writes, u8, b, __io_bw(), __io_aw()) 276 274 __io_writes_outs(writes, u16, w, __io_bw(), __io_aw()) ··· 282 280 __io_writes_outs(outs, u8, b, __io_pbw(), __io_paw()) 283 281 __io_writes_outs(outs, u16, w, __io_pbw(), __io_paw()) 284 282 __io_writes_outs(outs, u32, l, __io_pbw(), __io_paw()) 285 - #define outsb(addr, buffer, count) __outsb((void __iomem *)addr, buffer, count) 286 - #define outsw(addr, buffer, count) __outsw((void __iomem *)addr, buffer, count) 287 - #define outsl(addr, buffer, count) __outsl((void __iomem *)addr, buffer, count) 283 + #define outsb(addr, buffer, count) __outsb((void __iomem *)(long)addr, buffer, count) 284 + #define outsw(addr, buffer, count) __outsw((void __iomem *)(long)addr, buffer, count) 285 + #define outsl(addr, buffer, count) __outsl((void __iomem *)(long)addr, buffer, count) 288 286 289 287 #ifdef CONFIG_64BIT 290 288 __io_reads_ins(reads, u64, q, __io_br(), __io_ar())
+4
arch/riscv/include/asm/mmu.h
··· 19 19 20 20 typedef struct { 21 21 void *vdso; 22 + #ifdef CONFIG_SMP 23 + /* A local icache flush is needed before user execution can resume. */ 24 + cpumask_t icache_stale_mask; 25 + #endif 22 26 } mm_context_t; 23 27 24 28 #endif /* __ASSEMBLY__ */
+45
arch/riscv/include/asm/mmu_context.h
··· 1 1 /* 2 2 * Copyright (C) 2012 Regents of the University of California 3 + * Copyright (C) 2017 SiFive 3 4 * 4 5 * This program is free software; you can redistribute it and/or 5 6 * modify it under the terms of the GNU General Public License ··· 15 14 #ifndef _ASM_RISCV_MMU_CONTEXT_H 16 15 #define _ASM_RISCV_MMU_CONTEXT_H 17 16 17 + #include <linux/mm_types.h> 18 18 #include <asm-generic/mm_hooks.h> 19 19 20 20 #include <linux/mm.h> 21 21 #include <linux/sched.h> 22 22 #include <asm/tlbflush.h> 23 + #include <asm/cacheflush.h> 23 24 24 25 static inline void enter_lazy_tlb(struct mm_struct *mm, 25 26 struct task_struct *task) ··· 49 46 csr_write(sptbr, virt_to_pfn(pgd) | SPTBR_MODE); 50 47 } 51 48 49 + /* 50 + * When necessary, performs a deferred icache flush for the given MM context, 51 + * on the local CPU. RISC-V has no direct mechanism for instruction cache 52 + * shoot downs, so instead we send an IPI that informs the remote harts they 53 + * need to flush their local instruction caches. To avoid pathologically slow 54 + * behavior in a common case (a bunch of single-hart processes on a many-hart 55 + * machine, ie 'make -j') we avoid the IPIs for harts that are not currently 56 + * executing a MM context and instead schedule a deferred local instruction 57 + * cache flush to be performed before execution resumes on each hart. This 58 + * actually performs that local instruction cache flush, which implicitly only 59 + * refers to the current hart. 60 + */ 61 + static inline void flush_icache_deferred(struct mm_struct *mm) 62 + { 63 + #ifdef CONFIG_SMP 64 + unsigned int cpu = smp_processor_id(); 65 + cpumask_t *mask = &mm->context.icache_stale_mask; 66 + 67 + if (cpumask_test_cpu(cpu, mask)) { 68 + cpumask_clear_cpu(cpu, mask); 69 + /* 70 + * Ensure the remote hart's writes are visible to this hart. 71 + * This pairs with a barrier in flush_icache_mm. 72 + */ 73 + smp_mb(); 74 + local_flush_icache_all(); 75 + } 76 + #endif 77 + } 78 + 52 79 static inline void switch_mm(struct mm_struct *prev, 53 80 struct mm_struct *next, struct task_struct *task) 54 81 { 55 82 if (likely(prev != next)) { 83 + /* 84 + * Mark the current MM context as inactive, and the next as 85 + * active. This is at least used by the icache flushing 86 + * routines in order to determine who should 87 + */ 88 + unsigned int cpu = smp_processor_id(); 89 + 90 + cpumask_clear_cpu(cpu, mm_cpumask(prev)); 91 + cpumask_set_cpu(cpu, mm_cpumask(next)); 92 + 56 93 set_pgdir(next->pgd); 57 94 local_flush_tlb_all(); 95 + 96 + flush_icache_deferred(next); 58 97 } 59 98 } 60 99
+32 -26
arch/riscv/include/asm/pgtable.h
··· 178 178 #define pte_offset_map(dir, addr) pte_offset_kernel((dir), (addr)) 179 179 #define pte_unmap(pte) ((void)(pte)) 180 180 181 - /* 182 - * Certain architectures need to do special things when PTEs within 183 - * a page table are directly modified. Thus, the following hook is 184 - * made available. 185 - */ 186 - static inline void set_pte(pte_t *ptep, pte_t pteval) 187 - { 188 - *ptep = pteval; 189 - } 190 - 191 - static inline void set_pte_at(struct mm_struct *mm, 192 - unsigned long addr, pte_t *ptep, pte_t pteval) 193 - { 194 - set_pte(ptep, pteval); 195 - } 196 - 197 - static inline void pte_clear(struct mm_struct *mm, 198 - unsigned long addr, pte_t *ptep) 199 - { 200 - set_pte_at(mm, addr, ptep, __pte(0)); 201 - } 202 - 203 181 static inline int pte_present(pte_t pte) 204 182 { 205 183 return (pte_val(pte) & _PAGE_PRESENT); ··· 188 210 return (pte_val(pte) == 0); 189 211 } 190 212 191 - /* static inline int pte_read(pte_t pte) */ 192 - 193 213 static inline int pte_write(pte_t pte) 194 214 { 195 215 return pte_val(pte) & _PAGE_WRITE; 216 + } 217 + 218 + static inline int pte_exec(pte_t pte) 219 + { 220 + return pte_val(pte) & _PAGE_EXEC; 196 221 } 197 222 198 223 static inline int pte_huge(pte_t pte) ··· 203 222 return pte_present(pte) 204 223 && (pte_val(pte) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)); 205 224 } 206 - 207 - /* static inline int pte_exec(pte_t pte) */ 208 225 209 226 static inline int pte_dirty(pte_t pte) 210 227 { ··· 288 309 static inline int pte_same(pte_t pte_a, pte_t pte_b) 289 310 { 290 311 return pte_val(pte_a) == pte_val(pte_b); 312 + } 313 + 314 + /* 315 + * Certain architectures need to do special things when PTEs within 316 + * a page table are directly modified. Thus, the following hook is 317 + * made available. 318 + */ 319 + static inline void set_pte(pte_t *ptep, pte_t pteval) 320 + { 321 + *ptep = pteval; 322 + } 323 + 324 + void flush_icache_pte(pte_t pte); 325 + 326 + static inline void set_pte_at(struct mm_struct *mm, 327 + unsigned long addr, pte_t *ptep, pte_t pteval) 328 + { 329 + if (pte_present(pteval) && pte_exec(pteval)) 330 + flush_icache_pte(pteval); 331 + 332 + set_pte(ptep, pteval); 333 + } 334 + 335 + static inline void pte_clear(struct mm_struct *mm, 336 + unsigned long addr, pte_t *ptep) 337 + { 338 + set_pte_at(mm, addr, ptep, __pte(0)); 291 339 } 292 340 293 341 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+1 -10
arch/riscv/include/asm/spinlock.h
··· 24 24 25 25 /* FIXME: Replace this with a ticket lock, like MIPS. */ 26 26 27 - #define arch_spin_is_locked(x) ((x)->lock != 0) 27 + #define arch_spin_is_locked(x) (READ_ONCE((x)->lock) != 0) 28 28 29 29 static inline void arch_spin_unlock(arch_spinlock_t *lock) 30 30 { ··· 56 56 if (arch_spin_trylock(lock)) 57 57 break; 58 58 } 59 - } 60 - 61 - static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) 62 - { 63 - smp_rmb(); 64 - do { 65 - cpu_relax(); 66 - } while (arch_spin_is_locked(lock)); 67 - smp_acquire__after_ctrl_dep(); 68 59 } 69 60 70 61 /***********************************************************/
+2 -1
arch/riscv/include/asm/timex.h
··· 18 18 19 19 typedef unsigned long cycles_t; 20 20 21 - static inline cycles_t get_cycles(void) 21 + static inline cycles_t get_cycles_inline(void) 22 22 { 23 23 cycles_t n; 24 24 ··· 27 27 : "=r" (n)); 28 28 return n; 29 29 } 30 + #define get_cycles get_cycles_inline 30 31 31 32 #ifdef CONFIG_64BIT 32 33 static inline uint64_t get_cycles64(void)
+6 -1
arch/riscv/include/asm/tlbflush.h
··· 17 17 18 18 #ifdef CONFIG_MMU 19 19 20 - /* Flush entire local TLB */ 20 + #include <linux/mm_types.h> 21 + 22 + /* 23 + * Flush entire local TLB. 'sfence.vma' implicitly fences with the instruction 24 + * cache as well, so a 'fence.i' is not necessary. 25 + */ 21 26 static inline void local_flush_tlb_all(void) 22 27 { 23 28 __asm__ __volatile__ ("sfence.vma" : : : "memory");
+28
arch/riscv/include/asm/vdso-syscalls.h
··· 1 + /* 2 + * Copyright (C) 2017 SiFive 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + * You should have received a copy of the GNU General Public License 14 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 15 + */ 16 + 17 + #ifndef _ASM_RISCV_VDSO_SYSCALLS_H 18 + #define _ASM_RISCV_VDSO_SYSCALLS_H 19 + 20 + #ifdef CONFIG_SMP 21 + 22 + /* These syscalls are only used by the vDSO and are not in the uapi. */ 23 + #define __NR_riscv_flush_icache (__NR_arch_specific_syscall + 15) 24 + __SYSCALL(__NR_riscv_flush_icache, sys_riscv_flush_icache) 25 + 26 + #endif 27 + 28 + #endif /* _ASM_RISCV_VDSO_H */
+4
arch/riscv/include/asm/vdso.h
··· 38 38 (void __user *)((unsigned long)(base) + __vdso_##name); \ 39 39 }) 40 40 41 + #ifdef CONFIG_SMP 42 + asmlinkage long sys_riscv_flush_icache(uintptr_t, uintptr_t, uintptr_t); 43 + #endif 44 + 41 45 #endif /* _ASM_RISCV_VDSO_H */
-3
arch/riscv/kernel/head.S
··· 152 152 __PAGE_ALIGNED_BSS 153 153 /* Empty zero page */ 154 154 .balign PAGE_SIZE 155 - ENTRY(empty_zero_page) 156 - .fill (empty_zero_page + PAGE_SIZE) - ., 1, 0x00 157 - END(empty_zero_page)
+3
arch/riscv/kernel/riscv_ksyms.c
··· 12 12 /* 13 13 * Assembly functions that may be used (directly or indirectly) by modules 14 14 */ 15 + EXPORT_SYMBOL(__clear_user); 15 16 EXPORT_SYMBOL(__copy_user); 17 + EXPORT_SYMBOL(memset); 18 + EXPORT_SYMBOL(memcpy);
+5
arch/riscv/kernel/setup.c
··· 58 58 #endif /* CONFIG_CMDLINE_BOOL */ 59 59 60 60 unsigned long va_pa_offset; 61 + EXPORT_SYMBOL(va_pa_offset); 61 62 unsigned long pfn_base; 63 + EXPORT_SYMBOL(pfn_base); 64 + 65 + unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; 66 + EXPORT_SYMBOL(empty_zero_page); 62 67 63 68 /* The lucky hart to first increment this variable will boot the other cores */ 64 69 atomic_t hart_lottery;
+55
arch/riscv/kernel/smp.c
··· 38 38 IPI_MAX 39 39 }; 40 40 41 + 42 + /* Unsupported */ 43 + int setup_profiling_timer(unsigned int multiplier) 44 + { 45 + return -EINVAL; 46 + } 47 + 41 48 irqreturn_t handle_ipi(void) 42 49 { 43 50 unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits; ··· 114 107 void smp_send_reschedule(int cpu) 115 108 { 116 109 send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); 110 + } 111 + 112 + /* 113 + * Performs an icache flush for the given MM context. RISC-V has no direct 114 + * mechanism for instruction cache shoot downs, so instead we send an IPI that 115 + * informs the remote harts they need to flush their local instruction caches. 116 + * To avoid pathologically slow behavior in a common case (a bunch of 117 + * single-hart processes on a many-hart machine, ie 'make -j') we avoid the 118 + * IPIs for harts that are not currently executing a MM context and instead 119 + * schedule a deferred local instruction cache flush to be performed before 120 + * execution resumes on each hart. 121 + */ 122 + void flush_icache_mm(struct mm_struct *mm, bool local) 123 + { 124 + unsigned int cpu; 125 + cpumask_t others, *mask; 126 + 127 + preempt_disable(); 128 + 129 + /* Mark every hart's icache as needing a flush for this MM. */ 130 + mask = &mm->context.icache_stale_mask; 131 + cpumask_setall(mask); 132 + /* Flush this hart's I$ now, and mark it as flushed. */ 133 + cpu = smp_processor_id(); 134 + cpumask_clear_cpu(cpu, mask); 135 + local_flush_icache_all(); 136 + 137 + /* 138 + * Flush the I$ of other harts concurrently executing, and mark them as 139 + * flushed. 140 + */ 141 + cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu)); 142 + local |= cpumask_empty(&others); 143 + if (mm != current->active_mm || !local) 144 + sbi_remote_fence_i(others.bits); 145 + else { 146 + /* 147 + * It's assumed that at least one strongly ordered operation is 148 + * performed on this hart between setting a hart's cpumask bit 149 + * and scheduling this MM context on that hart. Sending an SBI 150 + * remote message will do this, but in the case where no 151 + * messages are sent we still need to order this hart's writes 152 + * with flush_icache_deferred(). 153 + */ 154 + smp_mb(); 155 + } 156 + 157 + preempt_enable(); 117 158 }
+32 -1
arch/riscv/kernel/sys_riscv.c
··· 14 14 */ 15 15 16 16 #include <linux/syscalls.h> 17 - #include <asm/cmpxchg.h> 18 17 #include <asm/unistd.h> 18 + #include <asm/cacheflush.h> 19 19 20 20 static long riscv_sys_mmap(unsigned long addr, unsigned long len, 21 21 unsigned long prot, unsigned long flags, ··· 47 47 return riscv_sys_mmap(addr, len, prot, flags, fd, offset, 12); 48 48 } 49 49 #endif /* !CONFIG_64BIT */ 50 + 51 + #ifdef CONFIG_SMP 52 + /* 53 + * Allows the instruction cache to be flushed from userspace. Despite RISC-V 54 + * having a direct 'fence.i' instruction available to userspace (which we 55 + * can't trap!), that's not actually viable when running on Linux because the 56 + * kernel might schedule a process on another hart. There is no way for 57 + * userspace to handle this without invoking the kernel (as it doesn't know the 58 + * thread->hart mappings), so we've defined a RISC-V specific system call to 59 + * flush the instruction cache. 60 + * 61 + * sys_riscv_flush_icache() is defined to flush the instruction cache over an 62 + * address range, with the flush applying to either all threads or just the 63 + * caller. We don't currently do anything with the address range, that's just 64 + * in there for forwards compatibility. 65 + */ 66 + SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end, 67 + uintptr_t, flags) 68 + { 69 + struct mm_struct *mm = current->mm; 70 + bool local = (flags & SYS_RISCV_FLUSH_ICACHE_LOCAL) != 0; 71 + 72 + /* Check the reserved flags. */ 73 + if (unlikely(flags & !SYS_RISCV_FLUSH_ICACHE_ALL)) 74 + return -EINVAL; 75 + 76 + flush_icache_mm(mm, local); 77 + 78 + return 0; 79 + } 80 + #endif
+2
arch/riscv/kernel/syscall_table.c
··· 15 15 #include <linux/linkage.h> 16 16 #include <linux/syscalls.h> 17 17 #include <asm-generic/syscalls.h> 18 + #include <asm/vdso.h> 18 19 19 20 #undef __SYSCALL 20 21 #define __SYSCALL(nr, call) [nr] = (call), ··· 23 22 void *sys_call_table[__NR_syscalls] = { 24 23 [0 ... __NR_syscalls - 1] = sys_ni_syscall, 25 24 #include <asm/unistd.h> 25 + #include <asm/vdso-syscalls.h> 26 26 };
+6 -1
arch/riscv/kernel/vdso/Makefile
··· 1 1 # Copied from arch/tile/kernel/vdso/Makefile 2 2 3 3 # Symbols present in the vdso 4 - vdso-syms = rt_sigreturn 4 + vdso-syms = rt_sigreturn 5 + vdso-syms += gettimeofday 6 + vdso-syms += clock_gettime 7 + vdso-syms += clock_getres 8 + vdso-syms += getcpu 9 + vdso-syms += flush_icache 5 10 6 11 # Files to link into the vdso 7 12 obj-vdso = $(patsubst %, %.o, $(vdso-syms))
+26
arch/riscv/kernel/vdso/clock_getres.S
··· 1 + /* 2 + * Copyright (C) 2017 SiFive 3 + * 4 + * This program is free software; you can redistribute it and/or 5 + * modify it under the terms of the GNU General Public License 6 + * as published by the Free Software Foundation, version 2. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + */ 13 + 14 + #include <linux/linkage.h> 15 + #include <asm/unistd.h> 16 + 17 + .text 18 + /* int __vdso_clock_getres(clockid_t clock_id, struct timespec *res); */ 19 + ENTRY(__vdso_clock_getres) 20 + .cfi_startproc 21 + /* For now, just do the syscall. */ 22 + li a7, __NR_clock_getres 23 + ecall 24 + ret 25 + .cfi_endproc 26 + ENDPROC(__vdso_clock_getres)
+26
arch/riscv/kernel/vdso/clock_gettime.S
··· 1 + /* 2 + * Copyright (C) 2017 SiFive 3 + * 4 + * This program is free software; you can redistribute it and/or 5 + * modify it under the terms of the GNU General Public License 6 + * as published by the Free Software Foundation, version 2. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + */ 13 + 14 + #include <linux/linkage.h> 15 + #include <asm/unistd.h> 16 + 17 + .text 18 + /* int __vdso_clock_gettime(clockid_t clock_id, struct timespec *tp); */ 19 + ENTRY(__vdso_clock_gettime) 20 + .cfi_startproc 21 + /* For now, just do the syscall. */ 22 + li a7, __NR_clock_gettime 23 + ecall 24 + ret 25 + .cfi_endproc 26 + ENDPROC(__vdso_clock_gettime)
+31
arch/riscv/kernel/vdso/flush_icache.S
··· 1 + /* 2 + * Copyright (C) 2017 SiFive 3 + * 4 + * This program is free software; you can redistribute it and/or 5 + * modify it under the terms of the GNU General Public License 6 + * as published by the Free Software Foundation, version 2. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + */ 13 + 14 + #include <linux/linkage.h> 15 + #include <asm/unistd.h> 16 + #include <asm/vdso-syscalls.h> 17 + 18 + .text 19 + /* int __vdso_flush_icache(void *start, void *end, unsigned long flags); */ 20 + ENTRY(__vdso_flush_icache) 21 + .cfi_startproc 22 + #ifdef CONFIG_SMP 23 + li a7, __NR_riscv_flush_icache 24 + ecall 25 + #else 26 + fence.i 27 + li a0, 0 28 + #endif 29 + ret 30 + .cfi_endproc 31 + ENDPROC(__vdso_flush_icache)
+26
arch/riscv/kernel/vdso/getcpu.S
··· 1 + /* 2 + * Copyright (C) 2017 SiFive 3 + * 4 + * This program is free software; you can redistribute it and/or 5 + * modify it under the terms of the GNU General Public License 6 + * as published by the Free Software Foundation, version 2. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + */ 13 + 14 + #include <linux/linkage.h> 15 + #include <asm/unistd.h> 16 + 17 + .text 18 + /* int __vdso_getcpu(unsigned *cpu, unsigned *node, void *unused); */ 19 + ENTRY(__vdso_getcpu) 20 + .cfi_startproc 21 + /* For now, just do the syscall. */ 22 + li a7, __NR_getcpu 23 + ecall 24 + ret 25 + .cfi_endproc 26 + ENDPROC(__vdso_getcpu)
+26
arch/riscv/kernel/vdso/gettimeofday.S
··· 1 + /* 2 + * Copyright (C) 2017 SiFive 3 + * 4 + * This program is free software; you can redistribute it and/or 5 + * modify it under the terms of the GNU General Public License 6 + * as published by the Free Software Foundation, version 2. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + */ 13 + 14 + #include <linux/linkage.h> 15 + #include <asm/unistd.h> 16 + 17 + .text 18 + /* int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz); */ 19 + ENTRY(__vdso_gettimeofday) 20 + .cfi_startproc 21 + /* For now, just do the syscall. */ 22 + li a7, __NR_gettimeofday 23 + ecall 24 + ret 25 + .cfi_endproc 26 + ENDPROC(__vdso_gettimeofday)
+5 -2
arch/riscv/kernel/vdso/vdso.lds.S
··· 70 70 LINUX_4.15 { 71 71 global: 72 72 __vdso_rt_sigreturn; 73 - __vdso_cmpxchg32; 74 - __vdso_cmpxchg64; 73 + __vdso_gettimeofday; 74 + __vdso_clock_gettime; 75 + __vdso_clock_getres; 76 + __vdso_getcpu; 77 + __vdso_flush_icache; 75 78 local: *; 76 79 }; 77 80 }
+1
arch/riscv/lib/delay.c
··· 84 84 while ((unsigned long)(get_cycles() - t0) < cycles) 85 85 cpu_relax(); 86 86 } 87 + EXPORT_SYMBOL(__delay); 87 88 88 89 void udelay(unsigned long usecs) 89 90 {
+1
arch/riscv/mm/Makefile
··· 2 2 obj-y += fault.o 3 3 obj-y += extable.o 4 4 obj-y += ioremap.o 5 + obj-y += cacheflush.o
+23
arch/riscv/mm/cacheflush.c
··· 1 + /* 2 + * Copyright (C) 2017 SiFive 3 + * 4 + * This program is free software; you can redistribute it and/or 5 + * modify it under the terms of the GNU General Public License 6 + * as published by the Free Software Foundation, version 2. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + */ 13 + 14 + #include <asm/pgtable.h> 15 + #include <asm/cacheflush.h> 16 + 17 + void flush_icache_pte(pte_t pte) 18 + { 19 + struct page *page = pte_page(pte); 20 + 21 + if (!test_and_set_bit(PG_dcache_clean, &page->flags)) 22 + flush_icache_all(); 23 + }
+1 -1
arch/riscv/mm/ioremap.c
··· 85 85 * 86 86 * Caller must ensure there is only one unmapping for the same pointer. 87 87 */ 88 - void iounmap(void __iomem *addr) 88 + void iounmap(volatile void __iomem *addr) 89 89 { 90 90 vunmap((void *)((unsigned long)addr & PAGE_MASK)); 91 91 }
include/lib/libgcc.h include/linux/libgcc.h
+1 -1
lib/ashldi3.c
··· 16 16 17 17 #include <linux/export.h> 18 18 19 - #include <lib/libgcc.h> 19 + #include <linux/libgcc.h> 20 20 21 21 long long notrace __ashldi3(long long u, word_type b) 22 22 {
+1 -1
lib/ashrdi3.c
··· 16 16 17 17 #include <linux/export.h> 18 18 19 - #include <lib/libgcc.h> 19 + #include <linux/libgcc.h> 20 20 21 21 long long notrace __ashrdi3(long long u, word_type b) 22 22 {
+1 -1
lib/cmpdi2.c
··· 16 16 17 17 #include <linux/export.h> 18 18 19 - #include <lib/libgcc.h> 19 + #include <linux/libgcc.h> 20 20 21 21 word_type notrace __cmpdi2(long long a, long long b) 22 22 {
+1 -1
lib/lshrdi3.c
··· 17 17 */ 18 18 19 19 #include <linux/module.h> 20 - #include <lib/libgcc.h> 20 + #include <linux/libgcc.h> 21 21 22 22 long long notrace __lshrdi3(long long u, word_type b) 23 23 {
+1 -1
lib/muldi3.c
··· 15 15 */ 16 16 17 17 #include <linux/export.h> 18 - #include <lib/libgcc.h> 18 + #include <linux/libgcc.h> 19 19 20 20 #define W_TYPE_SIZE 32 21 21
+1 -1
lib/ucmpdi2.c
··· 15 15 */ 16 16 17 17 #include <linux/module.h> 18 - #include <lib/libgcc.h> 18 + #include <linux/libgcc.h> 19 19 20 20 word_type __ucmpdi2(unsigned long long a, unsigned long long b) 21 21 {