Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'csky-for-linus-5.6-rc3' of git://github.com/c-sky/csky-linux

Pull csky updates from Guo Ren:
"Sorry, I missed 5.6-rc1 merge window, but in this pull request the
most are the fixes and the rests are between fixes and features. The
only outside modification is the MAINTAINERS file update with our
mailing list.

- cache flush implementation fixes

- ftrace modify panic fix

- CONFIG_SMP boot problem fix

- fix pt_regs saving for atomic.S

- fix fixaddr_init without highmem.

- fix stack protector support

- fix fake Tightly-Coupled Memory code compile and use

- fix some typos and coding convention"

* tag 'csky-for-linus-5.6-rc3' of git://github.com/c-sky/csky-linux: (23 commits)
csky: Replace <linux/clk-provider.h> by <linux/of_clk.h>
csky: Implement copy_thread_tls
csky: Add PCI support
csky: Minimize defconfig to support buildroot config.fragment
csky: Add setup_initrd check code
csky: Cleanup old Kconfig options
arch/csky: fix some Kconfig typos
csky: Fixup compile warning for three unimplemented syscalls
csky: Remove unused cache implementation
csky: Fixup ftrace modify panic
csky: Add flush_icache_mm to defer flush icache all
csky: Optimize abiv2 copy_to_user_page with VM_EXEC
csky: Enable defer flush_dcache_page for abiv2 cpus (807/810/860)
csky: Remove unnecessary flush_icache_* implementation
csky: Support icache flush without specific instructions
csky/Kconfig: Add Kconfig.platforms to support some drivers
csky/smp: Fixup boot failed when CONFIG_SMP
csky: Set regs->usp to kernel sp, when the exception is from kernel
csky/mm: Fixup export invalid_pte_table symbol
csky: Separate fixaddr_init from highmem
...

+668 -169
+1
MAINTAINERS
··· 3649 3649 3650 3650 C-SKY ARCHITECTURE 3651 3651 M: Guo Ren <guoren@kernel.org> 3652 + L: linux-csky@vger.kernel.org 3652 3653 T: git https://github.com/c-sky/csky-linux.git 3653 3654 S: Supported 3654 3655 F: arch/csky/
+48 -3
arch/csky/Kconfig
··· 9 9 select ARCH_USE_QUEUED_RWLOCKS if NR_CPUS>2 10 10 select COMMON_CLK 11 11 select CLKSRC_MMIO 12 - select CLKSRC_OF 13 12 select CSKY_MPINTC if CPU_CK860 14 13 select CSKY_MP_TIMER if CPU_CK860 15 14 select CSKY_APB_INTC ··· 36 37 select GX6605S_TIMER if CPU_CK610 37 38 select HAVE_ARCH_TRACEHOOK 38 39 select HAVE_ARCH_AUDITSYSCALL 40 + select HAVE_COPY_THREAD_TLS 39 41 select HAVE_DYNAMIC_FTRACE 40 42 select HAVE_FUNCTION_TRACER 41 43 select HAVE_FUNCTION_GRAPH_TRACER ··· 47 47 select HAVE_PERF_EVENTS 48 48 select HAVE_PERF_REGS 49 49 select HAVE_PERF_USER_STACK_DUMP 50 - select HAVE_DMA_API_DEBUG 51 50 select HAVE_DMA_CONTIGUOUS 51 + select HAVE_STACKPROTECTOR 52 52 select HAVE_SYSCALL_TRACEPOINTS 53 53 select MAY_HAVE_SPARSE_IRQ 54 54 select MODULES_USE_ELF_RELA if MODULES ··· 59 59 select TIMER_OF 60 60 select USB_ARCH_HAS_EHCI 61 61 select USB_ARCH_HAS_OHCI 62 + select GENERIC_PCI_IOMAP 63 + select HAVE_PCI 64 + select PCI_DOMAINS_GENERIC if PCI 65 + select PCI_SYSCALL if PCI 66 + select PCI_MSI if PCI 62 67 63 68 config CPU_HAS_CACHEV2 64 69 bool ··· 80 75 config CPU_HAS_LDSTEX 81 76 bool 82 77 help 83 - For SMP, CPU needs "ldex&stex" instrcutions to atomic operations. 78 + For SMP, CPU needs "ldex&stex" instructions for atomic operations. 84 79 85 80 config CPU_NEED_TLBSYNC 86 81 bool ··· 193 188 bool "stop" 194 189 endchoice 195 190 191 + menuconfig HAVE_TCM 192 + bool "Tightly-Coupled/Sram Memory" 193 + select GENERIC_ALLOCATOR 194 + help 195 + The implementation are not only used by TCM (Tightly-Coupled Meory) 196 + but also used by sram on SOC bus. It follow existed linux tcm 197 + software interface, so that old tcm application codes could be 198 + re-used directly. 199 + 200 + if HAVE_TCM 201 + config ITCM_RAM_BASE 202 + hex "ITCM ram base" 203 + default 0xffffffff 204 + 205 + config ITCM_NR_PAGES 206 + int "Page count of ITCM size: NR*4KB" 207 + range 1 256 208 + default 32 209 + 210 + config HAVE_DTCM 211 + bool "DTCM Support" 212 + 213 + config DTCM_RAM_BASE 214 + hex "DTCM ram base" 215 + depends on HAVE_DTCM 216 + default 0xffffffff 217 + 218 + config DTCM_NR_PAGES 219 + int "Page count of DTCM size: NR*4KB" 220 + depends on HAVE_DTCM 221 + range 1 256 222 + default 32 223 + endif 224 + 196 225 config CPU_HAS_VDSP 197 226 bool "CPU has VDSP coprocessor" 198 227 depends on CPU_HAS_FPU && CPU_HAS_FPUV2 ··· 234 195 config CPU_HAS_FPU 235 196 bool "CPU has FPU coprocessor" 236 197 depends on CPU_CK807 || CPU_CK810 || CPU_CK860 198 + 199 + config CPU_HAS_ICACHE_INS 200 + bool "CPU has Icache invalidate instructions" 201 + depends on CPU_HAS_CACHEV2 237 202 238 203 config CPU_HAS_TEE 239 204 bool "CPU has Trusted Execution Environment" ··· 277 234 278 235 Say N if you want to disable CPU hotplug. 279 236 endmenu 237 + 238 + source "arch/csky/Kconfig.platforms" 280 239 281 240 source "kernel/Kconfig.hz"
+9
arch/csky/Kconfig.platforms
··· 1 + menu "Platform drivers selection" 2 + 3 + config ARCH_CSKY_DW_APB_ICTL 4 + bool "Select dw-apb interrupt controller" 5 + select DW_APB_ICTL 6 + default y 7 + help 8 + This enables support for snps dw-apb-ictl 9 + endmenu
+2 -3
arch/csky/abiv1/inc/abi/cacheflush.h
··· 48 48 49 49 #define flush_icache_page(vma, page) do {} while (0); 50 50 #define flush_icache_range(start, end) cache_wbinv_range(start, end) 51 - 52 - #define flush_icache_user_range(vma,page,addr,len) \ 53 - flush_dcache_page(page) 51 + #define flush_icache_mm_range(mm, start, end) cache_wbinv_range(start, end) 52 + #define flush_icache_deferred(mm) do {} while (0); 54 53 55 54 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 56 55 do { \
+14 -5
arch/csky/abiv1/inc/abi/entry.h
··· 16 16 #define LSAVE_A4 40 17 17 #define LSAVE_A5 44 18 18 19 + #define usp ss1 20 + 19 21 .macro USPTOKSP 20 - mtcr sp, ss1 22 + mtcr sp, usp 21 23 mfcr sp, ss0 22 24 .endm 23 25 24 26 .macro KSPTOUSP 25 27 mtcr sp, ss0 26 - mfcr sp, ss1 28 + mfcr sp, usp 27 29 .endm 28 30 29 31 .macro SAVE_ALL epc_inc ··· 47 45 add lr, r13 48 46 stw lr, (sp, 8) 49 47 48 + mov lr, sp 49 + addi lr, 32 50 + addi lr, 32 51 + addi lr, 16 52 + bt 2f 50 53 mfcr lr, ss1 54 + 2: 51 55 stw lr, (sp, 16) 52 56 53 57 stw a0, (sp, 20) ··· 87 79 ldw a0, (sp, 12) 88 80 mtcr a0, epsr 89 81 btsti a0, 31 82 + bt 1f 90 83 ldw a0, (sp, 16) 91 84 mtcr a0, ss1 92 - 85 + 1: 93 86 ldw a0, (sp, 24) 94 87 ldw a1, (sp, 28) 95 88 ldw a2, (sp, 32) ··· 111 102 addi sp, 32 112 103 addi sp, 8 113 104 114 - bt 1f 105 + bt 2f 115 106 KSPTOUSP 116 - 1: 107 + 2: 117 108 rte 118 109 .endm 119 110
+63 -29
arch/csky/abiv2/cacheflush.c
··· 6 6 #include <linux/mm.h> 7 7 #include <asm/cache.h> 8 8 9 - void flush_icache_page(struct vm_area_struct *vma, struct page *page) 10 - { 11 - unsigned long start; 12 - 13 - start = (unsigned long) kmap_atomic(page); 14 - 15 - cache_wbinv_range(start, start + PAGE_SIZE); 16 - 17 - kunmap_atomic((void *)start); 18 - } 19 - 20 - void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, 21 - unsigned long vaddr, int len) 22 - { 23 - unsigned long kaddr; 24 - 25 - kaddr = (unsigned long) kmap_atomic(page) + (vaddr & ~PAGE_MASK); 26 - 27 - cache_wbinv_range(kaddr, kaddr + len); 28 - 29 - kunmap_atomic((void *)kaddr); 30 - } 31 - 32 9 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, 33 10 pte_t *pte) 34 11 { 35 - unsigned long addr, pfn; 12 + unsigned long addr; 36 13 struct page *page; 37 14 38 - pfn = pte_pfn(*pte); 39 - if (unlikely(!pfn_valid(pfn))) 15 + page = pfn_to_page(pte_pfn(*pte)); 16 + if (page == ZERO_PAGE(0)) 40 17 return; 41 18 42 - page = pfn_to_page(pfn); 43 - if (page == ZERO_PAGE(0)) 19 + if (test_and_set_bit(PG_dcache_clean, &page->flags)) 44 20 return; 45 21 46 22 addr = (unsigned long) kmap_atomic(page); 47 23 48 - cache_wbinv_range(addr, addr + PAGE_SIZE); 24 + dcache_wb_range(addr, addr + PAGE_SIZE); 25 + 26 + if (vma->vm_flags & VM_EXEC) 27 + icache_inv_range(addr, addr + PAGE_SIZE); 49 28 50 29 kunmap_atomic((void *) addr); 30 + } 31 + 32 + void flush_icache_deferred(struct mm_struct *mm) 33 + { 34 + unsigned int cpu = smp_processor_id(); 35 + cpumask_t *mask = &mm->context.icache_stale_mask; 36 + 37 + if (cpumask_test_cpu(cpu, mask)) { 38 + cpumask_clear_cpu(cpu, mask); 39 + /* 40 + * Ensure the remote hart's writes are visible to this hart. 41 + * This pairs with a barrier in flush_icache_mm. 42 + */ 43 + smp_mb(); 44 + local_icache_inv_all(NULL); 45 + } 46 + } 47 + 48 + void flush_icache_mm_range(struct mm_struct *mm, 49 + unsigned long start, unsigned long end) 50 + { 51 + unsigned int cpu; 52 + cpumask_t others, *mask; 53 + 54 + preempt_disable(); 55 + 56 + #ifdef CONFIG_CPU_HAS_ICACHE_INS 57 + if (mm == current->mm) { 58 + icache_inv_range(start, end); 59 + preempt_enable(); 60 + return; 61 + } 62 + #endif 63 + 64 + /* Mark every hart's icache as needing a flush for this MM. */ 65 + mask = &mm->context.icache_stale_mask; 66 + cpumask_setall(mask); 67 + 68 + /* Flush this hart's I$ now, and mark it as flushed. */ 69 + cpu = smp_processor_id(); 70 + cpumask_clear_cpu(cpu, mask); 71 + local_icache_inv_all(NULL); 72 + 73 + /* 74 + * Flush the I$ of other harts concurrently executing, and mark them as 75 + * flushed. 76 + */ 77 + cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu)); 78 + 79 + if (mm != current->active_mm || !cpumask_empty(&others)) { 80 + on_each_cpu_mask(&others, local_icache_inv_all, NULL, 1); 81 + cpumask_clear(mask); 82 + } 83 + 84 + preempt_enable(); 51 85 }
+22 -13
arch/csky/abiv2/inc/abi/cacheflush.h
··· 13 13 #define flush_cache_all() do { } while (0) 14 14 #define flush_cache_mm(mm) do { } while (0) 15 15 #define flush_cache_dup_mm(mm) do { } while (0) 16 - 17 - #define flush_cache_range(vma, start, end) \ 18 - do { \ 19 - if (vma->vm_flags & VM_EXEC) \ 20 - icache_inv_all(); \ 21 - } while (0) 22 - 16 + #define flush_cache_range(vma, start, end) do { } while (0) 23 17 #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) 24 - #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 25 - #define flush_dcache_page(page) do { } while (0) 18 + 19 + #define PG_dcache_clean PG_arch_1 20 + 21 + #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 22 + static inline void flush_dcache_page(struct page *page) 23 + { 24 + if (test_bit(PG_dcache_clean, &page->flags)) 25 + clear_bit(PG_dcache_clean, &page->flags); 26 + } 27 + 26 28 #define flush_dcache_mmap_lock(mapping) do { } while (0) 27 29 #define flush_dcache_mmap_unlock(mapping) do { } while (0) 30 + #define flush_icache_page(vma, page) do { } while (0) 28 31 29 32 #define flush_icache_range(start, end) cache_wbinv_range(start, end) 30 33 31 - void flush_icache_page(struct vm_area_struct *vma, struct page *page); 32 - void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, 33 - unsigned long vaddr, int len); 34 + void flush_icache_mm_range(struct mm_struct *mm, 35 + unsigned long start, unsigned long end); 36 + void flush_icache_deferred(struct mm_struct *mm); 34 37 35 38 #define flush_cache_vmap(start, end) do { } while (0) 36 39 #define flush_cache_vunmap(start, end) do { } while (0) ··· 41 38 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 42 39 do { \ 43 40 memcpy(dst, src, len); \ 44 - cache_wbinv_range((unsigned long)dst, (unsigned long)dst + len); \ 41 + if (vma->vm_flags & VM_EXEC) { \ 42 + dcache_wb_range((unsigned long)dst, \ 43 + (unsigned long)dst + len); \ 44 + flush_icache_mm_range(current->mm, \ 45 + (unsigned long)dst, \ 46 + (unsigned long)dst + len); \ 47 + } \ 45 48 } while (0) 46 49 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 47 50 memcpy(dst, src, len)
+11
arch/csky/abiv2/inc/abi/entry.h
··· 31 31 32 32 mfcr lr, epsr 33 33 stw lr, (sp, 12) 34 + btsti lr, 31 35 + bf 1f 36 + addi lr, sp, 152 37 + br 2f 38 + 1: 34 39 mfcr lr, usp 40 + 2: 35 41 stw lr, (sp, 16) 36 42 37 43 stw a0, (sp, 20) ··· 70 64 mtcr a0, epc 71 65 ldw a0, (sp, 12) 72 66 mtcr a0, epsr 67 + btsti a0, 31 73 68 ldw a0, (sp, 16) 74 69 mtcr a0, usp 70 + mtcr a0, ss0 75 71 76 72 #ifdef CONFIG_CPU_HAS_HILO 77 73 ldw a0, (sp, 140) ··· 94 86 addi sp, 40 95 87 ldm r16-r30, (sp) 96 88 addi sp, 72 89 + bf 1f 90 + mfcr sp, ss0 91 + 1: 97 92 rte 98 93 .endm 99 94
-8
arch/csky/configs/defconfig
··· 10 10 CONFIG_BSD_PROCESS_ACCT_V3=y 11 11 CONFIG_MODULES=y 12 12 CONFIG_MODULE_UNLOAD=y 13 - CONFIG_DEFAULT_DEADLINE=y 14 - CONFIG_CPU_CK807=y 15 - CONFIG_CPU_HAS_FPU=y 16 13 CONFIG_NET=y 17 14 CONFIG_PACKET=y 18 15 CONFIG_UNIX=y ··· 24 27 CONFIG_SERIAL_8250=y 25 28 CONFIG_SERIAL_8250_CONSOLE=y 26 29 CONFIG_SERIAL_OF_PLATFORM=y 27 - CONFIG_TTY_PRINTK=y 28 30 # CONFIG_VGA_CONSOLE is not set 29 - CONFIG_CSKY_MPTIMER=y 30 - CONFIG_GX6605S_TIMER=y 31 31 CONFIG_PM_DEVFREQ=y 32 32 CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y 33 33 CONFIG_DEVFREQ_GOV_PERFORMANCE=y ··· 50 56 CONFIG_ROMFS_FS=y 51 57 CONFIG_NFS_FS=y 52 58 CONFIG_PRINTK_TIME=y 53 - CONFIG_DEBUG_INFO=y 54 - CONFIG_DEBUG_FS=y 55 59 CONFIG_MAGIC_SYSRQ=y
-1
arch/csky/include/asm/Kbuild
··· 28 28 generic-y += mm-arch-hooks.h 29 29 generic-y += mmiowb.h 30 30 generic-y += module.h 31 - generic-y += pci.h 32 31 generic-y += percpu.h 33 32 generic-y += preempt.h 34 33 generic-y += qrwlock.h
+1
arch/csky/include/asm/cache.h
··· 16 16 17 17 void icache_inv_range(unsigned long start, unsigned long end); 18 18 void icache_inv_all(void); 19 + void local_icache_inv_all(void *priv); 19 20 20 21 void dcache_wb_range(unsigned long start, unsigned long end); 21 22 void dcache_wbinv_all(void);
+1
arch/csky/include/asm/cacheflush.h
··· 4 4 #ifndef __ASM_CSKY_CACHEFLUSH_H 5 5 #define __ASM_CSKY_CACHEFLUSH_H 6 6 7 + #include <linux/mm.h> 7 8 #include <abi/cacheflush.h> 8 9 9 10 #endif /* __ASM_CSKY_CACHEFLUSH_H */
+8 -1
arch/csky/include/asm/fixmap.h
··· 5 5 #define __ASM_CSKY_FIXMAP_H 6 6 7 7 #include <asm/page.h> 8 + #include <asm/memory.h> 8 9 #ifdef CONFIG_HIGHMEM 9 10 #include <linux/threads.h> 10 11 #include <asm/kmap_types.h> 11 12 #endif 12 13 13 14 enum fixed_addresses { 15 + #ifdef CONFIG_HAVE_TCM 16 + FIX_TCM = TCM_NR_PAGES, 17 + #endif 14 18 #ifdef CONFIG_HIGHMEM 15 19 FIX_KMAP_BEGIN, 16 20 FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1, ··· 22 18 __end_of_fixed_addresses 23 19 }; 24 20 25 - #define FIXADDR_TOP 0xffffc000 26 21 #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) 27 22 #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) 28 23 29 24 #include <asm-generic/fixmap.h> 25 + 26 + extern void fixrange_init(unsigned long start, unsigned long end, 27 + pgd_t *pgd_base); 28 + extern void __init fixaddr_init(void); 30 29 31 30 #endif /* __ASM_CSKY_FIXMAP_H */
+25
arch/csky/include/asm/memory.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + #ifndef __ASM_CSKY_MEMORY_H 4 + #define __ASM_CSKY_MEMORY_H 5 + 6 + #include <linux/compiler.h> 7 + #include <linux/const.h> 8 + #include <linux/types.h> 9 + #include <linux/sizes.h> 10 + 11 + #define FIXADDR_TOP _AC(0xffffc000, UL) 12 + #define PKMAP_BASE _AC(0xff800000, UL) 13 + #define VMALLOC_START _AC(0xc0008000, UL) 14 + #define VMALLOC_END (PKMAP_BASE - (PAGE_SIZE * 2)) 15 + 16 + #ifdef CONFIG_HAVE_TCM 17 + #ifdef CONFIG_HAVE_DTCM 18 + #define TCM_NR_PAGES (CONFIG_ITCM_NR_PAGES + CONFIG_DTCM_NR_PAGES) 19 + #else 20 + #define TCM_NR_PAGES (CONFIG_ITCM_NR_PAGES) 21 + #endif 22 + #define FIXADDR_TCM _AC(FIXADDR_TOP - (TCM_NR_PAGES * PAGE_SIZE), UL) 23 + #endif 24 + 25 + #endif
+1
arch/csky/include/asm/mmu.h
··· 7 7 typedef struct { 8 8 atomic64_t asid; 9 9 void *vdso; 10 + cpumask_t icache_stale_mask; 10 11 } mm_context_t; 11 12 12 13 #endif /* __ASM_CSKY_MMU_H */
+2
arch/csky/include/asm/mmu_context.h
··· 43 43 44 44 TLBMISS_HANDLER_SETUP_PGD(next->pgd); 45 45 write_mmu_entryhi(next->context.asid.counter); 46 + 47 + flush_icache_deferred(next); 46 48 } 47 49 #endif /* __ASM_CSKY_MMU_CONTEXT_H */
+34
arch/csky/include/asm/pci.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + 3 + #ifndef __ASM_CSKY_PCI_H 4 + #define __ASM_CSKY_PCI_H 5 + 6 + #include <linux/types.h> 7 + #include <linux/slab.h> 8 + #include <linux/dma-mapping.h> 9 + 10 + #include <asm/io.h> 11 + 12 + #define PCIBIOS_MIN_IO 0 13 + #define PCIBIOS_MIN_MEM 0 14 + 15 + /* C-SKY shim does not initialize PCI bus */ 16 + #define pcibios_assign_all_busses() 1 17 + 18 + extern int isa_dma_bridge_buggy; 19 + 20 + #ifdef CONFIG_PCI 21 + static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) 22 + { 23 + /* no legacy IRQ on csky */ 24 + return -ENODEV; 25 + } 26 + 27 + static inline int pci_proc_domain(struct pci_bus *bus) 28 + { 29 + /* always show the domain in /proc */ 30 + return 1; 31 + } 32 + #endif /* CONFIG_PCI */ 33 + 34 + #endif /* __ASM_CSKY_PCI_H */
+1 -5
arch/csky/include/asm/pgtable.h
··· 5 5 #define __ASM_CSKY_PGTABLE_H 6 6 7 7 #include <asm/fixmap.h> 8 + #include <asm/memory.h> 8 9 #include <asm/addrspace.h> 9 10 #include <abi/pgtable-bits.h> 10 11 #include <asm-generic/pgtable-nopmd.h> ··· 16 15 17 16 #define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE) 18 17 #define FIRST_USER_ADDRESS 0UL 19 - 20 - #define PKMAP_BASE (0xff800000) 21 - 22 - #define VMALLOC_START (0xc0008000) 23 - #define VMALLOC_END (PKMAP_BASE - 2*PAGE_SIZE) 24 18 25 19 /* 26 20 * C-SKY is two-level paging structure:
+29
arch/csky/include/asm/stackprotector.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef _ASM_STACKPROTECTOR_H 3 + #define _ASM_STACKPROTECTOR_H 1 4 + 5 + #include <linux/random.h> 6 + #include <linux/version.h> 7 + 8 + extern unsigned long __stack_chk_guard; 9 + 10 + /* 11 + * Initialize the stackprotector canary value. 12 + * 13 + * NOTE: this must only be called from functions that never return, 14 + * and it must always be inlined. 15 + */ 16 + static __always_inline void boot_init_stack_canary(void) 17 + { 18 + unsigned long canary; 19 + 20 + /* Try to get a semi random initial value. */ 21 + get_random_bytes(&canary, sizeof(canary)); 22 + canary ^= LINUX_VERSION_CODE; 23 + canary &= CANARY_MASK; 24 + 25 + current->stack_canary = canary; 26 + __stack_chk_guard = current->stack_canary; 27 + } 28 + 29 + #endif /* __ASM_SH_STACKPROTECTOR_H */
+24
arch/csky/include/asm/tcm.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + 3 + #ifndef __ASM_CSKY_TCM_H 4 + #define __ASM_CSKY_TCM_H 5 + 6 + #ifndef CONFIG_HAVE_TCM 7 + #error "You should not be including tcm.h unless you have a TCM!" 8 + #endif 9 + 10 + #include <linux/compiler.h> 11 + 12 + /* Tag variables with this */ 13 + #define __tcmdata __section(.tcm.data) 14 + /* Tag constants with this */ 15 + #define __tcmconst __section(.tcm.rodata) 16 + /* Tag functions inside TCM called from outside TCM with this */ 17 + #define __tcmfunc __section(.tcm.text) noinline 18 + /* Tag function inside TCM called from inside TCM with this */ 19 + #define __tcmlocalfunc __section(.tcm.text) 20 + 21 + void *tcm_alloc(size_t len); 22 + void tcm_free(void *addr, size_t len); 23 + 24 + #endif
+3
arch/csky/include/uapi/asm/unistd.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 2 2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 3 4 + #define __ARCH_WANT_STAT64 5 + #define __ARCH_WANT_NEW_STAT 4 6 #define __ARCH_WANT_SYS_CLONE 7 + #define __ARCH_WANT_SYS_CLONE3 5 8 #define __ARCH_WANT_SET_GET_RLIMIT 6 9 #define __ARCH_WANT_TIME32_SYSCALLS 7 10 #include <asm-generic/unistd.h>
+6 -2
arch/csky/kernel/atomic.S
··· 17 17 mfcr a3, epc 18 18 addi a3, TRAP0_SIZE 19 19 20 - subi sp, 8 20 + subi sp, 16 21 21 stw a3, (sp, 0) 22 22 mfcr a3, epsr 23 23 stw a3, (sp, 4) 24 + mfcr a3, usp 25 + stw a3, (sp, 8) 24 26 25 27 psrset ee 26 28 #ifdef CONFIG_CPU_HAS_LDSTEX ··· 49 47 mtcr a3, epc 50 48 ldw a3, (sp, 4) 51 49 mtcr a3, epsr 52 - addi sp, 8 50 + ldw a3, (sp, 8) 51 + mtcr a3, usp 52 + addi sp, 16 53 53 KSPTOUSP 54 54 rte 55 55 END(csky_cmpxchg)
+10 -3
arch/csky/kernel/process.c
··· 16 16 17 17 struct cpuinfo_csky cpu_data[NR_CPUS]; 18 18 19 + #ifdef CONFIG_STACKPROTECTOR 20 + #include <linux/stackprotector.h> 21 + unsigned long __stack_chk_guard __read_mostly; 22 + EXPORT_SYMBOL(__stack_chk_guard); 23 + #endif 24 + 19 25 asmlinkage void ret_from_fork(void); 20 26 asmlinkage void ret_from_kernel_thread(void); 21 27 ··· 40 34 return sw->r15; 41 35 } 42 36 43 - int copy_thread(unsigned long clone_flags, 37 + int copy_thread_tls(unsigned long clone_flags, 44 38 unsigned long usp, 45 39 unsigned long kthread_arg, 46 - struct task_struct *p) 40 + struct task_struct *p, 41 + unsigned long tls) 47 42 { 48 43 struct switch_stack *childstack; 49 44 struct pt_regs *childregs = task_pt_regs(p); ··· 71 64 childregs->usp = usp; 72 65 if (clone_flags & CLONE_SETTLS) 73 66 task_thread_info(p)->tp_value = childregs->tls 74 - = childregs->regs[0]; 67 + = tls; 75 68 76 69 childregs->a0 = 0; 77 70 childstack->r15 = (unsigned long) ret_from_fork;
+2 -3
arch/csky/kernel/setup.c
··· 47 47 signed long size; 48 48 49 49 memblock_reserve(__pa(_stext), _end - _stext); 50 - #ifdef CONFIG_BLK_DEV_INITRD 51 - memblock_reserve(__pa(initrd_start), initrd_end - initrd_start); 52 - #endif 53 50 54 51 early_init_fdt_reserve_self(); 55 52 early_init_fdt_scan_reserved_mem(); ··· 129 132 #endif 130 133 131 134 sparse_init(); 135 + 136 + fixaddr_init(); 132 137 133 138 #ifdef CONFIG_HIGHMEM 134 139 kmap_init();
+1 -1
arch/csky/kernel/smp.c
··· 120 120 int rc; 121 121 122 122 if (ipi_irq == 0) 123 - panic("%s IRQ mapping failed\n", __func__); 123 + return; 124 124 125 125 rc = request_percpu_irq(ipi_irq, handle_ipi, "IPI Interrupt", 126 126 &ipi_dummy_dev);
+1 -1
arch/csky/kernel/time.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 3 4 - #include <linux/clk-provider.h> 5 4 #include <linux/clocksource.h> 5 + #include <linux/of_clk.h> 6 6 7 7 void __init time_init(void) 8 8 {
+49
arch/csky/kernel/vmlinux.lds.S
··· 2 2 3 3 #include <asm/vmlinux.lds.h> 4 4 #include <asm/page.h> 5 + #include <asm/memory.h> 5 6 6 7 OUTPUT_ARCH(csky) 7 8 ENTRY(_start) ··· 53 52 RO_DATA(PAGE_SIZE) 54 53 RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) 55 54 _edata = .; 55 + 56 + #ifdef CONFIG_HAVE_TCM 57 + .tcm_start : { 58 + . = ALIGN(PAGE_SIZE); 59 + __tcm_start = .; 60 + } 61 + 62 + .text_data_tcm FIXADDR_TCM : AT(__tcm_start) 63 + { 64 + . = ALIGN(4); 65 + __stcm_text_data = .; 66 + *(.tcm.text) 67 + *(.tcm.rodata) 68 + #ifndef CONFIG_HAVE_DTCM 69 + *(.tcm.data) 70 + #endif 71 + . = ALIGN(4); 72 + __etcm_text_data = .; 73 + } 74 + 75 + . = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_data_tcm); 76 + 77 + #ifdef CONFIG_HAVE_DTCM 78 + #define ITCM_SIZE CONFIG_ITCM_NR_PAGES * PAGE_SIZE 79 + 80 + .dtcm_start : { 81 + __dtcm_start = .; 82 + } 83 + 84 + .data_tcm FIXADDR_TCM + ITCM_SIZE : AT(__dtcm_start) 85 + { 86 + . = ALIGN(4); 87 + __stcm_data = .; 88 + *(.tcm.data) 89 + . = ALIGN(4); 90 + __etcm_data = .; 91 + } 92 + 93 + . = ADDR(.dtcm_start) + SIZEOF(.data_tcm); 94 + 95 + .tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_tcm)) { 96 + #else 97 + .tcm_end : AT(ADDR(.tcm_start) + SIZEOF(.text_data_tcm)) { 98 + #endif 99 + . = ALIGN(PAGE_SIZE); 100 + __tcm_end = .; 101 + } 102 + #endif 56 103 57 104 EXCEPTION_TABLE(L1_CACHE_BYTES) 58 105 BSS_SECTION(L1_CACHE_BYTES, PAGE_SIZE, L1_CACHE_BYTES)
+3
arch/csky/mm/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 2 ifeq ($(CONFIG_CPU_HAS_CACHEV2),y) 3 3 obj-y += cachev2.o 4 + CFLAGS_REMOVE_cachev2.o = $(CC_FLAGS_FTRACE) 4 5 else 5 6 obj-y += cachev1.o 7 + CFLAGS_REMOVE_cachev1.o = $(CC_FLAGS_FTRACE) 6 8 endif 7 9 8 10 obj-y += dma-mapping.o ··· 16 14 obj-y += tlb.o 17 15 obj-y += asid.o 18 16 obj-y += context.o 17 + obj-$(CONFIG_HAVE_TCM) += tcm.o
+5
arch/csky/mm/cachev1.c
··· 94 94 cache_op_all(INS_CACHE|CACHE_INV, 0); 95 95 } 96 96 97 + void local_icache_inv_all(void *priv) 98 + { 99 + cache_op_all(INS_CACHE|CACHE_INV, 0); 100 + } 101 + 97 102 void dcache_wb_range(unsigned long start, unsigned long end) 98 103 { 99 104 cache_op_range(start, end, DATA_CACHE|CACHE_CLR, 0);
+23 -24
arch/csky/mm/cachev2.c
··· 3 3 4 4 #include <linux/spinlock.h> 5 5 #include <linux/smp.h> 6 + #include <linux/mm.h> 6 7 #include <asm/cache.h> 7 8 #include <asm/barrier.h> 8 9 9 - inline void dcache_wb_line(unsigned long start) 10 + #define INS_CACHE (1 << 0) 11 + #define CACHE_INV (1 << 4) 12 + 13 + void local_icache_inv_all(void *priv) 10 14 { 11 - asm volatile("dcache.cval1 %0\n"::"r"(start):"memory"); 15 + mtcr("cr17", INS_CACHE|CACHE_INV); 12 16 sync_is(); 13 17 } 14 18 19 + void icache_inv_all(void) 20 + { 21 + on_each_cpu(local_icache_inv_all, NULL, 1); 22 + } 23 + 24 + #ifdef CONFIG_CPU_HAS_ICACHE_INS 15 25 void icache_inv_range(unsigned long start, unsigned long end) 16 26 { 17 27 unsigned long i = start & ~(L1_CACHE_BYTES - 1); ··· 30 20 asm volatile("icache.iva %0\n"::"r"(i):"memory"); 31 21 sync_is(); 32 22 } 33 - 34 - void icache_inv_all(void) 23 + #else 24 + void icache_inv_range(unsigned long start, unsigned long end) 35 25 { 36 - asm volatile("icache.ialls\n":::"memory"); 26 + icache_inv_all(); 27 + } 28 + #endif 29 + 30 + inline void dcache_wb_line(unsigned long start) 31 + { 32 + asm volatile("dcache.cval1 %0\n"::"r"(start):"memory"); 37 33 sync_is(); 38 34 } 39 35 ··· 52 36 sync_is(); 53 37 } 54 38 55 - void dcache_inv_range(unsigned long start, unsigned long end) 56 - { 57 - unsigned long i = start & ~(L1_CACHE_BYTES - 1); 58 - 59 - for (; i < end; i += L1_CACHE_BYTES) 60 - asm volatile("dcache.civa %0\n"::"r"(i):"memory"); 61 - sync_is(); 62 - } 63 - 64 39 void cache_wbinv_range(unsigned long start, unsigned long end) 65 40 { 66 - unsigned long i = start & ~(L1_CACHE_BYTES - 1); 67 - 68 - for (; i < end; i += L1_CACHE_BYTES) 69 - asm volatile("dcache.cval1 %0\n"::"r"(i):"memory"); 70 - sync_is(); 71 - 72 - i = start & ~(L1_CACHE_BYTES - 1); 73 - for (; i < end; i += L1_CACHE_BYTES) 74 - asm volatile("icache.iva %0\n"::"r"(i):"memory"); 75 - sync_is(); 41 + dcache_wb_range(start, end); 42 + icache_inv_range(start, end); 76 43 } 77 44 EXPORT_SYMBOL(cache_wbinv_range); 78 45
+3 -59
arch/csky/mm/highmem.c
··· 117 117 return pte_page(*pte); 118 118 } 119 119 120 - static void __init fixrange_init(unsigned long start, unsigned long end, 121 - pgd_t *pgd_base) 122 - { 123 - #ifdef CONFIG_HIGHMEM 124 - pgd_t *pgd; 125 - pud_t *pud; 126 - pmd_t *pmd; 127 - pte_t *pte; 128 - int i, j, k; 129 - unsigned long vaddr; 130 - 131 - vaddr = start; 132 - i = __pgd_offset(vaddr); 133 - j = __pud_offset(vaddr); 134 - k = __pmd_offset(vaddr); 135 - pgd = pgd_base + i; 136 - 137 - for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { 138 - pud = (pud_t *)pgd; 139 - for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { 140 - pmd = (pmd_t *)pud; 141 - for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { 142 - if (pmd_none(*pmd)) { 143 - pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); 144 - if (!pte) 145 - panic("%s: Failed to allocate %lu bytes align=%lx\n", 146 - __func__, PAGE_SIZE, 147 - PAGE_SIZE); 148 - 149 - set_pmd(pmd, __pmd(__pa(pte))); 150 - BUG_ON(pte != pte_offset_kernel(pmd, 0)); 151 - } 152 - vaddr += PMD_SIZE; 153 - } 154 - k = 0; 155 - } 156 - j = 0; 157 - } 158 - #endif 159 - } 160 - 161 - void __init fixaddr_kmap_pages_init(void) 120 + static void __init kmap_pages_init(void) 162 121 { 163 122 unsigned long vaddr; 164 - pgd_t *pgd_base; 165 - #ifdef CONFIG_HIGHMEM 166 123 pgd_t *pgd; 167 124 pmd_t *pmd; 168 125 pud_t *pud; 169 126 pte_t *pte; 170 - #endif 171 - pgd_base = swapper_pg_dir; 172 127 173 - /* 174 - * Fixed mappings: 175 - */ 176 - vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; 177 - fixrange_init(vaddr, 0, pgd_base); 178 - 179 - #ifdef CONFIG_HIGHMEM 180 - /* 181 - * Permanent kmaps: 182 - */ 183 128 vaddr = PKMAP_BASE; 184 - fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); 129 + fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, swapper_pg_dir); 185 130 186 131 pgd = swapper_pg_dir + __pgd_offset(vaddr); 187 132 pud = (pud_t *)pgd; 188 133 pmd = pmd_offset(pud, vaddr); 189 134 pte = pte_offset_kernel(pmd, vaddr); 190 135 pkmap_page_table = pte; 191 - #endif 192 136 } 193 137 194 138 void __init kmap_init(void) 195 139 { 196 140 unsigned long vaddr; 197 141 198 - fixaddr_kmap_pages_init(); 142 + kmap_pages_init(); 199 143 200 144 vaddr = __fix_to_virt(FIX_KMAP_BEGIN); 201 145
+92
arch/csky/mm/init.c
··· 19 19 #include <linux/swap.h> 20 20 #include <linux/proc_fs.h> 21 21 #include <linux/pfn.h> 22 + #include <linux/initrd.h> 22 23 23 24 #include <asm/setup.h> 24 25 #include <asm/cachectl.h> ··· 32 31 33 32 pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; 34 33 pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss; 34 + EXPORT_SYMBOL(invalid_pte_table); 35 35 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] 36 36 __page_aligned_bss; 37 37 EXPORT_SYMBOL(empty_zero_page); 38 + 39 + #ifdef CONFIG_BLK_DEV_INITRD 40 + static void __init setup_initrd(void) 41 + { 42 + unsigned long size; 43 + 44 + if (initrd_start >= initrd_end) { 45 + pr_err("initrd not found or empty"); 46 + goto disable; 47 + } 48 + 49 + if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) { 50 + pr_err("initrd extends beyond end of memory"); 51 + goto disable; 52 + } 53 + 54 + size = initrd_end - initrd_start; 55 + 56 + if (memblock_is_region_reserved(__pa(initrd_start), size)) { 57 + pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region", 58 + __pa(initrd_start), size); 59 + goto disable; 60 + } 61 + 62 + memblock_reserve(__pa(initrd_start), size); 63 + 64 + pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n", 65 + (void *)(initrd_start), size); 66 + 67 + initrd_below_start_ok = 1; 68 + 69 + return; 70 + 71 + disable: 72 + initrd_start = initrd_end = 0; 73 + 74 + pr_err(" - disabling initrd\n"); 75 + } 76 + #endif 38 77 39 78 void __init mem_init(void) 40 79 { ··· 86 45 max_mapnr = max_low_pfn; 87 46 #endif 88 47 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); 48 + 49 + #ifdef CONFIG_BLK_DEV_INITRD 50 + setup_initrd(); 51 + #endif 89 52 90 53 memblock_free_all(); 91 54 ··· 145 100 146 101 /* Setup page mask to 4k */ 147 102 write_mmu_pagemask(0); 103 + } 104 + 105 + void __init fixrange_init(unsigned long start, unsigned long end, 106 + pgd_t *pgd_base) 107 + { 108 + pgd_t *pgd; 109 + pud_t *pud; 110 + pmd_t *pmd; 111 + pte_t *pte; 112 + int i, j, k; 113 + unsigned long vaddr; 114 + 115 + vaddr = start; 116 + i = __pgd_offset(vaddr); 117 + j = __pud_offset(vaddr); 118 + k = __pmd_offset(vaddr); 119 + pgd = pgd_base + i; 120 + 121 + for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { 122 + pud = (pud_t *)pgd; 123 + for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { 124 + pmd = (pmd_t *)pud; 125 + for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { 126 + if (pmd_none(*pmd)) { 127 + pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); 128 + if (!pte) 129 + panic("%s: Failed to allocate %lu bytes align=%lx\n", 130 + __func__, PAGE_SIZE, 131 + PAGE_SIZE); 132 + 133 + set_pmd(pmd, __pmd(__pa(pte))); 134 + BUG_ON(pte != pte_offset_kernel(pmd, 0)); 135 + } 136 + vaddr += PMD_SIZE; 137 + } 138 + k = 0; 139 + } 140 + j = 0; 141 + } 142 + } 143 + 144 + void __init fixaddr_init(void) 145 + { 146 + unsigned long vaddr; 147 + 148 + vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; 149 + fixrange_init(vaddr, vaddr + PMD_SIZE, swapper_pg_dir); 148 150 }
+5 -8
arch/csky/mm/syscache.c
··· 3 3 4 4 #include <linux/syscalls.h> 5 5 #include <asm/page.h> 6 - #include <asm/cache.h> 6 + #include <asm/cacheflush.h> 7 7 #include <asm/cachectl.h> 8 8 9 9 SYSCALL_DEFINE3(cacheflush, ··· 13 13 { 14 14 switch (cache) { 15 15 case ICACHE: 16 - icache_inv_range((unsigned long)addr, 17 - (unsigned long)addr + bytes); 18 - break; 16 + case BCACHE: 17 + flush_icache_mm_range(current->mm, 18 + (unsigned long)addr, 19 + (unsigned long)addr + bytes); 19 20 case DCACHE: 20 21 dcache_wb_range((unsigned long)addr, 21 22 (unsigned long)addr + bytes); 22 - break; 23 - case BCACHE: 24 - cache_wbinv_range((unsigned long)addr, 25 - (unsigned long)addr + bytes); 26 23 break; 27 24 default: 28 25 return -EINVAL;
+169
arch/csky/mm/tcm.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #include <linux/highmem.h> 4 + #include <linux/genalloc.h> 5 + #include <asm/tlbflush.h> 6 + #include <asm/fixmap.h> 7 + 8 + #if (CONFIG_ITCM_RAM_BASE == 0xffffffff) 9 + #error "You should define ITCM_RAM_BASE" 10 + #endif 11 + 12 + #ifdef CONFIG_HAVE_DTCM 13 + #if (CONFIG_DTCM_RAM_BASE == 0xffffffff) 14 + #error "You should define DTCM_RAM_BASE" 15 + #endif 16 + 17 + #if (CONFIG_DTCM_RAM_BASE == CONFIG_ITCM_RAM_BASE) 18 + #error "You should define correct DTCM_RAM_BASE" 19 + #endif 20 + #endif 21 + 22 + extern char __tcm_start, __tcm_end, __dtcm_start; 23 + 24 + static struct gen_pool *tcm_pool; 25 + 26 + static void __init tcm_mapping_init(void) 27 + { 28 + pte_t *tcm_pte; 29 + unsigned long vaddr, paddr; 30 + int i; 31 + 32 + paddr = CONFIG_ITCM_RAM_BASE; 33 + 34 + if (pfn_valid(PFN_DOWN(CONFIG_ITCM_RAM_BASE))) 35 + goto panic; 36 + 37 + #ifndef CONFIG_HAVE_DTCM 38 + for (i = 0; i < TCM_NR_PAGES; i++) { 39 + #else 40 + for (i = 0; i < CONFIG_ITCM_NR_PAGES; i++) { 41 + #endif 42 + vaddr = __fix_to_virt(FIX_TCM - i); 43 + 44 + tcm_pte = 45 + pte_offset_kernel((pmd_t *)pgd_offset_k(vaddr), vaddr); 46 + 47 + set_pte(tcm_pte, pfn_pte(__phys_to_pfn(paddr), PAGE_KERNEL)); 48 + 49 + flush_tlb_one(vaddr); 50 + 51 + paddr = paddr + PAGE_SIZE; 52 + } 53 + 54 + #ifdef CONFIG_HAVE_DTCM 55 + if (pfn_valid(PFN_DOWN(CONFIG_DTCM_RAM_BASE))) 56 + goto panic; 57 + 58 + paddr = CONFIG_DTCM_RAM_BASE; 59 + 60 + for (i = 0; i < CONFIG_DTCM_NR_PAGES; i++) { 61 + vaddr = __fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES - i); 62 + 63 + tcm_pte = 64 + pte_offset_kernel((pmd_t *) pgd_offset_k(vaddr), vaddr); 65 + 66 + set_pte(tcm_pte, pfn_pte(__phys_to_pfn(paddr), PAGE_KERNEL)); 67 + 68 + flush_tlb_one(vaddr); 69 + 70 + paddr = paddr + PAGE_SIZE; 71 + } 72 + #endif 73 + 74 + #ifndef CONFIG_HAVE_DTCM 75 + memcpy((void *)__fix_to_virt(FIX_TCM), 76 + &__tcm_start, &__tcm_end - &__tcm_start); 77 + 78 + pr_info("%s: mapping tcm va:0x%08lx to pa:0x%08x\n", 79 + __func__, __fix_to_virt(FIX_TCM), CONFIG_ITCM_RAM_BASE); 80 + 81 + pr_info("%s: __tcm_start va:0x%08lx size:%d\n", 82 + __func__, (unsigned long)&__tcm_start, &__tcm_end - &__tcm_start); 83 + #else 84 + memcpy((void *)__fix_to_virt(FIX_TCM), 85 + &__tcm_start, &__dtcm_start - &__tcm_start); 86 + 87 + pr_info("%s: mapping itcm va:0x%08lx to pa:0x%08x\n", 88 + __func__, __fix_to_virt(FIX_TCM), CONFIG_ITCM_RAM_BASE); 89 + 90 + pr_info("%s: __itcm_start va:0x%08lx size:%d\n", 91 + __func__, (unsigned long)&__tcm_start, &__dtcm_start - &__tcm_start); 92 + 93 + memcpy((void *)__fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES), 94 + &__dtcm_start, &__tcm_end - &__dtcm_start); 95 + 96 + pr_info("%s: mapping dtcm va:0x%08lx to pa:0x%08x\n", 97 + __func__, __fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES), 98 + CONFIG_DTCM_RAM_BASE); 99 + 100 + pr_info("%s: __dtcm_start va:0x%08lx size:%d\n", 101 + __func__, (unsigned long)&__dtcm_start, &__tcm_end - &__dtcm_start); 102 + 103 + #endif 104 + return; 105 + panic: 106 + panic("TCM init error"); 107 + } 108 + 109 + void *tcm_alloc(size_t len) 110 + { 111 + unsigned long vaddr; 112 + 113 + if (!tcm_pool) 114 + return NULL; 115 + 116 + vaddr = gen_pool_alloc(tcm_pool, len); 117 + if (!vaddr) 118 + return NULL; 119 + 120 + return (void *) vaddr; 121 + } 122 + EXPORT_SYMBOL(tcm_alloc); 123 + 124 + void tcm_free(void *addr, size_t len) 125 + { 126 + gen_pool_free(tcm_pool, (unsigned long) addr, len); 127 + } 128 + EXPORT_SYMBOL(tcm_free); 129 + 130 + static int __init tcm_setup_pool(void) 131 + { 132 + #ifndef CONFIG_HAVE_DTCM 133 + u32 pool_size = (u32) (TCM_NR_PAGES * PAGE_SIZE) 134 + - (u32) (&__tcm_end - &__tcm_start); 135 + 136 + u32 tcm_pool_start = __fix_to_virt(FIX_TCM) 137 + + (u32) (&__tcm_end - &__tcm_start); 138 + #else 139 + u32 pool_size = (u32) (CONFIG_DTCM_NR_PAGES * PAGE_SIZE) 140 + - (u32) (&__tcm_end - &__dtcm_start); 141 + 142 + u32 tcm_pool_start = __fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES) 143 + + (u32) (&__tcm_end - &__dtcm_start); 144 + #endif 145 + int ret; 146 + 147 + tcm_pool = gen_pool_create(2, -1); 148 + 149 + ret = gen_pool_add(tcm_pool, tcm_pool_start, pool_size, -1); 150 + if (ret) { 151 + pr_err("%s: gen_pool add failed!\n", __func__); 152 + return ret; 153 + } 154 + 155 + pr_info("%s: Added %d bytes @ 0x%08x to memory pool\n", 156 + __func__, pool_size, tcm_pool_start); 157 + 158 + return 0; 159 + } 160 + 161 + static int __init tcm_init(void) 162 + { 163 + tcm_mapping_init(); 164 + 165 + tcm_setup_pool(); 166 + 167 + return 0; 168 + } 169 + arch_initcall(tcm_init);