Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'frv' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-2.6-frv

* 'frv' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-2.6-frv:
FRV: Use generic show_interrupts()
FRV: Convert genirq namespace
frv: Select GENERIC_HARDIRQS_NO_DEPRECATED
frv: Convert cpu irq_chip to new functions
frv: Convert mb93493 irq_chip to new functions
frv: Convert mb93093 irq_chip to new function
frv: Convert mb93091 irq_chip to new functions
frv: Fix typo from __do_IRQ overhaul
frv: Remove stale irq_chip.end
FRV: Do some cleanups
FRV: Missing node arg in alloc_thread_info_node() macro
NOMMU: implement access_remote_vm
NOMMU: support SMP dynamic percpu_alloc
NOMMU: percpu should use is_vmalloc_addr().

+76 -27
-1
arch/frv/Kconfig
··· 363 363 364 364 config ARCH_SUSPEND_POSSIBLE 365 365 def_bool y 366 - depends on !SMP 367 366 368 367 source kernel/power/Kconfig 369 368 endmenu
-9
arch/frv/include/asm/system.h
··· 45 45 #define wmb() asm volatile ("membar" : : :"memory") 46 46 #define read_barrier_depends() do { } while (0) 47 47 48 - #ifdef CONFIG_SMP 49 - #define smp_mb() mb() 50 - #define smp_rmb() rmb() 51 - #define smp_wmb() wmb() 52 - #define smp_read_barrier_depends() read_barrier_depends() 53 - #define set_mb(var, value) \ 54 - do { xchg(&var, (value)); } while (0) 55 - #else 56 48 #define smp_mb() barrier() 57 49 #define smp_rmb() barrier() 58 50 #define smp_wmb() barrier() 59 51 #define smp_read_barrier_depends() do {} while(0) 60 52 #define set_mb(var, value) \ 61 53 do { var = (value); barrier(); } while (0) 62 - #endif 63 54 64 55 extern void die_if_kernel(const char *, ...) __attribute__((format(printf, 1, 2))); 65 56 extern void free_initmem(void);
+3 -1
arch/frv/include/asm/thread_info.h
··· 21 21 22 22 #define THREAD_SIZE 8192 23 23 24 + #define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR 25 + 24 26 /* 25 27 * low level task data that entry.S needs immediate access to 26 28 * - this struct should fit entirely inside of one cache line ··· 89 87 #define alloc_thread_info_node(tsk, node) \ 90 88 kzalloc_node(THREAD_SIZE, GFP_KERNEL, node) 91 89 #else 92 - #define alloc_thread_info_node(tsk) \ 90 + #define alloc_thread_info_node(tsk, node) \ 93 91 kmalloc_node(THREAD_SIZE, GFP_KERNEL, node) 94 92 #endif 95 93
+1 -1
arch/frv/kernel/irq-mb93091.c
··· 47 47 48 48 static void frv_fpga_ack(struct irq_data *d) 49 49 { 50 - __clr_IFR(1 << (irq - IRQ_BASE_FPGA)); 50 + __clr_IFR(1 << (d->irq - IRQ_BASE_FPGA)); 51 51 } 52 52 53 53 static void frv_fpga_mask_ack(struct irq_data *d)
+32
include/linux/vmalloc.h
··· 95 95 96 96 extern int map_vm_area(struct vm_struct *area, pgprot_t prot, 97 97 struct page ***pages); 98 + #ifdef CONFIG_MMU 98 99 extern int map_kernel_range_noflush(unsigned long start, unsigned long size, 99 100 pgprot_t prot, struct page **pages); 100 101 extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size); 101 102 extern void unmap_kernel_range(unsigned long addr, unsigned long size); 103 + #else 104 + static inline int 105 + map_kernel_range_noflush(unsigned long start, unsigned long size, 106 + pgprot_t prot, struct page **pages) 107 + { 108 + return size >> PAGE_SHIFT; 109 + } 110 + static inline void 111 + unmap_kernel_range_noflush(unsigned long addr, unsigned long size) 112 + { 113 + } 114 + static inline void 115 + unmap_kernel_range(unsigned long addr, unsigned long size) 116 + { 117 + } 118 + #endif 102 119 103 120 /* Allocate/destroy a 'vmalloc' VM area. */ 104 121 extern struct vm_struct *alloc_vm_area(size_t size); ··· 133 116 extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); 134 117 135 118 #ifdef CONFIG_SMP 119 + # ifdef CONFIG_MMU 136 120 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, 137 121 const size_t *sizes, int nr_vms, 138 122 size_t align); 139 123 140 124 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms); 125 + # else 126 + static inline struct vm_struct ** 127 + pcpu_get_vm_areas(const unsigned long *offsets, 128 + const size_t *sizes, int nr_vms, 129 + size_t align) 130 + { 131 + return NULL; 132 + } 133 + 134 + static inline void 135 + pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) 136 + { 137 + } 138 + # endif 141 139 #endif 142 140 143 141 #endif /* _LINUX_VMALLOC_H */
+39 -13
mm/nommu.c
··· 1971 1971 } 1972 1972 EXPORT_SYMBOL(filemap_fault); 1973 1973 1974 - /* 1975 - * Access another process' address space. 1976 - * - source/target buffer must be kernel space 1977 - */ 1978 - int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) 1974 + static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, 1975 + unsigned long addr, void *buf, int len, int write) 1979 1976 { 1980 1977 struct vm_area_struct *vma; 1981 - struct mm_struct *mm; 1982 - 1983 - if (addr + len < addr) 1984 - return 0; 1985 - 1986 - mm = get_task_mm(tsk); 1987 - if (!mm) 1988 - return 0; 1989 1978 1990 1979 down_read(&mm->mmap_sem); 1991 1980 ··· 1999 2010 } 2000 2011 2001 2012 up_read(&mm->mmap_sem); 2013 + 2014 + return len; 2015 + } 2016 + 2017 + /** 2018 + * @access_remote_vm - access another process' address space 2019 + * @mm: the mm_struct of the target address space 2020 + * @addr: start address to access 2021 + * @buf: source or destination buffer 2022 + * @len: number of bytes to transfer 2023 + * @write: whether the access is a write 2024 + * 2025 + * The caller must hold a reference on @mm. 2026 + */ 2027 + int access_remote_vm(struct mm_struct *mm, unsigned long addr, 2028 + void *buf, int len, int write) 2029 + { 2030 + return __access_remote_vm(NULL, mm, addr, buf, len, write); 2031 + } 2032 + 2033 + /* 2034 + * Access another process' address space. 2035 + * - source/target buffer must be kernel space 2036 + */ 2037 + int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) 2038 + { 2039 + struct mm_struct *mm; 2040 + 2041 + if (addr + len < addr) 2042 + return 0; 2043 + 2044 + mm = get_task_mm(tsk); 2045 + if (!mm) 2046 + return 0; 2047 + 2048 + len = __access_remote_vm(tsk, mm, addr, buf, len, write); 2049 + 2002 2050 mmput(mm); 2003 2051 return len; 2004 2052 }
+1 -2
mm/percpu.c
··· 1008 1008 } 1009 1009 1010 1010 if (in_first_chunk) { 1011 - if ((unsigned long)addr < VMALLOC_START || 1012 - (unsigned long)addr >= VMALLOC_END) 1011 + if (!is_vmalloc_addr(addr)) 1013 1012 return __pa(addr); 1014 1013 else 1015 1014 return page_to_phys(vmalloc_to_page(addr));