Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/upstream-linus

* 'upstream' of git://git.linux-mips.org/pub/scm/upstream-linus: (26 commits)
MIPS: Alchemy: Fix reset for MTX-1 and XXS1500
MIPS: MTX-1: Make au1000_eth probe all PHY addresses
MIPS: Jz4740: Add HAVE_CLK
MIPS: Move idle task creation to work queue
MIPS, Perf-events: Use unsigned delta for right shift in event update
MIPS, Perf-events: Work with the new callchain interface
MIPS, Perf-events: Fix event check in validate_event()
MIPS, Perf-events: Work with the new PMU interface
MIPS, Perf-events: Work with irq_work
MIPS: Fix always CONFIG_LOONGSON_UART_BASE=y
MIPS: Loongson: Fix potentially wrong string handling
MIPS: Fix GCC-4.6 'set but not used' warning in arch/mips/mm/init.c
MIPS: Fix GCC-4.6 'set but not used' warning in ieee754int.h
MIPS: Remove unused code from arch/mips/kernel/syscall.c
MIPS: Fix GCC-4.6 'set but not used' warning in signal*.c
MIPS: MSP: Fix MSP71xx bpci interrupt handler return value
MIPS: Select R4K timer lib for all MSP platforms
MIPS: Loongson: Remove ad-hoc cmdline default
MIPS: Clear the correct flag in sysmips(MIPS_FIXADE, ...).
MIPS: Add an unreachable return statement to satisfy buggy GCCs.
...

+335 -309
+4
arch/mips/Kconfig
··· 4 4 select HAVE_GENERIC_DMA_COHERENT 5 5 select HAVE_IDE 6 6 select HAVE_OPROFILE 7 + select HAVE_IRQ_WORK 7 8 select HAVE_PERF_EVENTS 8 9 select PERF_USE_VMALLOC 9 10 select HAVE_ARCH_KGDB ··· 209 208 select ARCH_REQUIRE_GPIOLIB 210 209 select SYS_HAS_EARLY_PRINTK 211 210 select HAVE_PWM 211 + select HAVE_CLK 212 212 213 213 config LASAT 214 214 bool "LASAT Networks platforms" ··· 335 333 config PMC_MSP 336 334 bool "PMC-Sierra MSP chipsets" 337 335 depends on EXPERIMENTAL 336 + select CEVT_R4K 337 + select CSRC_R4K 338 338 select DMA_NONCOHERENT 339 339 select SWAP_IO_SPACE 340 340 select NO_EXCEPT_FILL
+2 -2
arch/mips/alchemy/mtx-1/board_setup.c
··· 54 54 55 55 static void mtx1_reset(char *c) 56 56 { 57 - /* Hit BCSR.SYSTEM_CONTROL[SW_RST] */ 58 - au_writel(0x00000000, 0xAE00001C); 57 + /* Jump to the reset vector */ 58 + __asm__ __volatile__("jr\t%0"::"r"(0xbfc00000)); 59 59 } 60 60 61 61 static void mtx1_power_off(void)
+9
arch/mips/alchemy/mtx-1/platform.c
··· 28 28 #include <linux/mtd/physmap.h> 29 29 #include <mtd/mtd-abi.h> 30 30 31 + #include <asm/mach-au1x00/au1xxx_eth.h> 32 + 31 33 static struct gpio_keys_button mtx1_gpio_button[] = { 32 34 { 33 35 .gpio = 207, ··· 142 140 &mtx1_mtd, 143 141 }; 144 142 143 + static struct au1000_eth_platform_data mtx1_au1000_eth0_pdata = { 144 + .phy_search_highest_addr = 1, 145 + .phy1_search_mac0 = 1, 146 + }; 147 + 145 148 static int __init mtx1_register_devices(void) 146 149 { 147 150 int rc; 151 + 152 + au1xxx_override_eth_cfg(0, &mtx1_au1000_eth0_pdata); 148 153 149 154 rc = gpio_request(mtx1_gpio_button[0].gpio, 150 155 mtx1_gpio_button[0].desc);
+2 -2
arch/mips/alchemy/xxs1500/board_setup.c
··· 36 36 37 37 static void xxs1500_reset(char *c) 38 38 { 39 - /* Hit BCSR.SYSTEM_CONTROL[SW_RST] */ 40 - au_writel(0x00000000, 0xAE00001C); 39 + /* Jump to the reset vector */ 40 + __asm__ __volatile__("jr\t%0"::"r"(0xbfc00000)); 41 41 } 42 42 43 43 static void xxs1500_power_off(void)
+1 -11
arch/mips/include/asm/perf_event.h
··· 11 11 12 12 #ifndef __MIPS_PERF_EVENT_H__ 13 13 #define __MIPS_PERF_EVENT_H__ 14 - 15 - /* 16 - * MIPS performance counters do not raise NMI upon overflow, a regular 17 - * interrupt will be signaled. Hence we can do the pending perf event 18 - * work at the tail of the irq handler. 19 - */ 20 - static inline void 21 - set_perf_event_pending(void) 22 - { 23 - } 24 - 14 + /* Leave it empty here. The file is required by linux/perf_event.h */ 25 15 #endif /* __MIPS_PERF_EVENT_H__ */
+95 -84
arch/mips/kernel/ftrace.c
··· 17 17 #include <asm/cacheflush.h> 18 18 #include <asm/uasm.h> 19 19 20 - /* 21 - * If the Instruction Pointer is in module space (0xc0000000), return true; 22 - * otherwise, it is in kernel space (0x80000000), return false. 23 - * 24 - * FIXME: This will not work when the kernel space and module space are the 25 - * same. If they are the same, we need to modify scripts/recordmcount.pl, 26 - * ftrace_make_nop/call() and the other related parts to ensure the 27 - * enabling/disabling of the calling site to _mcount is right for both kernel 28 - * and module. 29 - */ 30 - 31 - static inline int in_module(unsigned long ip) 32 - { 33 - return ip & 0x40000000; 34 - } 20 + #include <asm-generic/sections.h> 35 21 36 22 #ifdef CONFIG_DYNAMIC_FTRACE 37 23 38 24 #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ 39 25 #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ 40 26 41 - #define INSN_B_1F_4 0x10000004 /* b 1f; offset = 4 */ 42 - #define INSN_B_1F_5 0x10000005 /* b 1f; offset = 5 */ 43 27 #define INSN_NOP 0x00000000 /* nop */ 44 28 #define INSN_JAL(addr) \ 45 29 ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK))) ··· 53 69 #endif 54 70 } 55 71 72 + /* 73 + * Check if the address is in kernel space 74 + * 75 + * Clone core_kernel_text() from kernel/extable.c, but doesn't call 76 + * init_kernel_text() for Ftrace doesn't trace functions in init sections. 77 + */ 78 + static inline int in_kernel_space(unsigned long ip) 79 + { 80 + if (ip >= (unsigned long)_stext && 81 + ip <= (unsigned long)_etext) 82 + return 1; 83 + return 0; 84 + } 85 + 56 86 static int ftrace_modify_code(unsigned long ip, unsigned int new_code) 57 87 { 58 88 int faulted; ··· 82 84 return 0; 83 85 } 84 86 87 + /* 88 + * The details about the calling site of mcount on MIPS 89 + * 90 + * 1. For kernel: 91 + * 92 + * move at, ra 93 + * jal _mcount --> nop 94 + * 95 + * 2. For modules: 96 + * 97 + * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT 98 + * 99 + * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005) 100 + * addiu v1, v1, low_16bit_of_mcount 101 + * move at, ra 102 + * move $12, ra_address 103 + * jalr v1 104 + * sub sp, sp, 8 105 + * 1: offset = 5 instructions 106 + * 2.2 For the Other situations 107 + * 108 + * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004) 109 + * addiu v1, v1, low_16bit_of_mcount 110 + * move at, ra 111 + * jalr v1 112 + * nop | move $12, ra_address | sub sp, sp, 8 113 + * 1: offset = 4 instructions 114 + */ 115 + 116 + #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) 117 + #define MCOUNT_OFFSET_INSNS 5 118 + #else 119 + #define MCOUNT_OFFSET_INSNS 4 120 + #endif 121 + #define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS) 122 + 85 123 int ftrace_make_nop(struct module *mod, 86 124 struct dyn_ftrace *rec, unsigned long addr) 87 125 { ··· 125 91 unsigned long ip = rec->ip; 126 92 127 93 /* 128 - * We have compiled module with -mlong-calls, but compiled the kernel 129 - * without it, we need to cope with them respectively. 94 + * If ip is in kernel space, no long call, otherwise, long call is 95 + * needed. 130 96 */ 131 - if (in_module(ip)) { 132 - #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) 133 - /* 134 - * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005) 135 - * addiu v1, v1, low_16bit_of_mcount 136 - * move at, ra 137 - * move $12, ra_address 138 - * jalr v1 139 - * sub sp, sp, 8 140 - * 1: offset = 5 instructions 141 - */ 142 - new = INSN_B_1F_5; 143 - #else 144 - /* 145 - * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004) 146 - * addiu v1, v1, low_16bit_of_mcount 147 - * move at, ra 148 - * jalr v1 149 - * nop | move $12, ra_address | sub sp, sp, 8 150 - * 1: offset = 4 instructions 151 - */ 152 - new = INSN_B_1F_4; 153 - #endif 154 - } else { 155 - /* 156 - * move at, ra 157 - * jal _mcount --> nop 158 - */ 159 - new = INSN_NOP; 160 - } 97 + new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F; 98 + 161 99 return ftrace_modify_code(ip, new); 162 100 } 163 101 ··· 138 132 unsigned int new; 139 133 unsigned long ip = rec->ip; 140 134 141 - /* ip, module: 0xc0000000, kernel: 0x80000000 */ 142 - new = in_module(ip) ? insn_lui_v1_hi16_mcount : insn_jal_ftrace_caller; 135 + new = in_kernel_space(ip) ? insn_jal_ftrace_caller : 136 + insn_lui_v1_hi16_mcount; 143 137 144 138 return ftrace_modify_code(ip, new); 145 139 } ··· 196 190 #define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */ 197 191 #define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */ 198 192 199 - unsigned long ftrace_get_parent_addr(unsigned long self_addr, 200 - unsigned long parent, 201 - unsigned long parent_addr, 202 - unsigned long fp) 193 + unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long 194 + old_parent_ra, unsigned long parent_ra_addr, unsigned long fp) 203 195 { 204 - unsigned long sp, ip, ra; 196 + unsigned long sp, ip, tmp; 205 197 unsigned int code; 206 198 int faulted; 207 199 208 200 /* 209 - * For module, move the ip from calling site of mcount to the 210 - * instruction "lui v1, hi_16bit_of_mcount"(offset is 20), but for 211 - * kernel, move to the instruction "move ra, at"(offset is 12) 201 + * For module, move the ip from the return address after the 202 + * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for 203 + * kernel, move after the instruction "move ra, at"(offset is 16) 212 204 */ 213 - ip = self_addr - (in_module(self_addr) ? 20 : 12); 205 + ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24); 214 206 215 207 /* 216 208 * search the text until finding the non-store instruction or "s{d,w} 217 209 * ra, offset(sp)" instruction 218 210 */ 219 211 do { 220 - ip -= 4; 221 - 222 212 /* get the code at "ip": code = *(unsigned int *)ip; */ 223 213 safe_load_code(code, ip, faulted); 224 214 ··· 226 224 * store the ra on the stack 227 225 */ 228 226 if ((code & S_R_SP) != S_R_SP) 229 - return parent_addr; 227 + return parent_ra_addr; 230 228 231 - } while (((code & S_RA_SP) != S_RA_SP)); 229 + /* Move to the next instruction */ 230 + ip -= 4; 231 + } while ((code & S_RA_SP) != S_RA_SP); 232 232 233 233 sp = fp + (code & OFFSET_MASK); 234 234 235 - /* ra = *(unsigned long *)sp; */ 236 - safe_load_stack(ra, sp, faulted); 235 + /* tmp = *(unsigned long *)sp; */ 236 + safe_load_stack(tmp, sp, faulted); 237 237 if (unlikely(faulted)) 238 238 return 0; 239 239 240 - if (ra == parent) 240 + if (tmp == old_parent_ra) 241 241 return sp; 242 242 return 0; 243 243 } ··· 250 246 * Hook the return address and push it in the stack of return addrs 251 247 * in current thread info. 252 248 */ 253 - void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, 249 + void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra, 254 250 unsigned long fp) 255 251 { 256 - unsigned long old; 252 + unsigned long old_parent_ra; 257 253 struct ftrace_graph_ent trace; 258 254 unsigned long return_hooker = (unsigned long) 259 255 &return_to_handler; 260 - int faulted; 256 + int faulted, insns; 261 257 262 258 if (unlikely(atomic_read(&current->tracing_graph_pause))) 263 259 return; 264 260 265 261 /* 266 - * "parent" is the stack address saved the return address of the caller 267 - * of _mcount. 262 + * "parent_ra_addr" is the stack address saved the return address of 263 + * the caller of _mcount. 268 264 * 269 265 * if the gcc < 4.5, a leaf function does not save the return address 270 266 * in the stack address, so, we "emulate" one in _mcount's stack space, ··· 279 275 * do it in ftrace_graph_caller of mcount.S. 280 276 */ 281 277 282 - /* old = *parent; */ 283 - safe_load_stack(old, parent, faulted); 278 + /* old_parent_ra = *parent_ra_addr; */ 279 + safe_load_stack(old_parent_ra, parent_ra_addr, faulted); 284 280 if (unlikely(faulted)) 285 281 goto out; 286 282 #ifndef KBUILD_MCOUNT_RA_ADDRESS 287 - parent = (unsigned long *)ftrace_get_parent_addr(self_addr, old, 288 - (unsigned long)parent, fp); 283 + parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra, 284 + old_parent_ra, (unsigned long)parent_ra_addr, fp); 289 285 /* 290 286 * If fails when getting the stack address of the non-leaf function's 291 287 * ra, stop function graph tracer and return 292 288 */ 293 - if (parent == 0) 289 + if (parent_ra_addr == 0) 294 290 goto out; 295 291 #endif 296 - /* *parent = return_hooker; */ 297 - safe_store_stack(return_hooker, parent, faulted); 292 + /* *parent_ra_addr = return_hooker; */ 293 + safe_store_stack(return_hooker, parent_ra_addr, faulted); 298 294 if (unlikely(faulted)) 299 295 goto out; 300 296 301 - if (ftrace_push_return_trace(old, self_addr, &trace.depth, fp) == 302 - -EBUSY) { 303 - *parent = old; 297 + if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp) 298 + == -EBUSY) { 299 + *parent_ra_addr = old_parent_ra; 304 300 return; 305 301 } 306 302 307 - trace.func = self_addr; 303 + /* 304 + * Get the recorded ip of the current mcount calling site in the 305 + * __mcount_loc section, which will be used to filter the function 306 + * entries configured through the tracing/set_graph_function interface. 307 + */ 308 + 309 + insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1; 310 + trace.func = self_ra - (MCOUNT_INSN_SIZE * insns); 308 311 309 312 /* Only trace if the calling function expects to */ 310 313 if (!ftrace_graph_entry(&trace)) { 311 314 current->curr_ret_stack--; 312 - *parent = old; 315 + *parent_ra_addr = old_parent_ra; 313 316 } 314 317 return; 315 318 out:
+170 -183
arch/mips/kernel/perf_event.c
··· 161 161 return ret; 162 162 } 163 163 164 - static int mipspmu_enable(struct perf_event *event) 165 - { 166 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 167 - struct hw_perf_event *hwc = &event->hw; 168 - int idx; 169 - int err = 0; 170 - 171 - /* To look for a free counter for this event. */ 172 - idx = mipspmu->alloc_counter(cpuc, hwc); 173 - if (idx < 0) { 174 - err = idx; 175 - goto out; 176 - } 177 - 178 - /* 179 - * If there is an event in the counter we are going to use then 180 - * make sure it is disabled. 181 - */ 182 - event->hw.idx = idx; 183 - mipspmu->disable_event(idx); 184 - cpuc->events[idx] = event; 185 - 186 - /* Set the period for the event. */ 187 - mipspmu_event_set_period(event, hwc, idx); 188 - 189 - /* Enable the event. */ 190 - mipspmu->enable_event(hwc, idx); 191 - 192 - /* Propagate our changes to the userspace mapping. */ 193 - perf_event_update_userpage(event); 194 - 195 - out: 196 - return err; 197 - } 198 - 199 164 static void mipspmu_event_update(struct perf_event *event, 200 165 struct hw_perf_event *hwc, 201 166 int idx) ··· 169 204 unsigned long flags; 170 205 int shift = 64 - TOTAL_BITS; 171 206 s64 prev_raw_count, new_raw_count; 172 - s64 delta; 207 + u64 delta; 173 208 174 209 again: 175 210 prev_raw_count = local64_read(&hwc->prev_count); ··· 196 231 return; 197 232 } 198 233 199 - static void mipspmu_disable(struct perf_event *event) 234 + static void mipspmu_start(struct perf_event *event, int flags) 235 + { 236 + struct hw_perf_event *hwc = &event->hw; 237 + 238 + if (!mipspmu) 239 + return; 240 + 241 + if (flags & PERF_EF_RELOAD) 242 + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); 243 + 244 + hwc->state = 0; 245 + 246 + /* Set the period for the event. */ 247 + mipspmu_event_set_period(event, hwc, hwc->idx); 248 + 249 + /* Enable the event. */ 250 + mipspmu->enable_event(hwc, hwc->idx); 251 + } 252 + 253 + static void mipspmu_stop(struct perf_event *event, int flags) 254 + { 255 + struct hw_perf_event *hwc = &event->hw; 256 + 257 + if (!mipspmu) 258 + return; 259 + 260 + if (!(hwc->state & PERF_HES_STOPPED)) { 261 + /* We are working on a local event. */ 262 + mipspmu->disable_event(hwc->idx); 263 + barrier(); 264 + mipspmu_event_update(event, hwc, hwc->idx); 265 + hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; 266 + } 267 + } 268 + 269 + static int mipspmu_add(struct perf_event *event, int flags) 270 + { 271 + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 272 + struct hw_perf_event *hwc = &event->hw; 273 + int idx; 274 + int err = 0; 275 + 276 + perf_pmu_disable(event->pmu); 277 + 278 + /* To look for a free counter for this event. */ 279 + idx = mipspmu->alloc_counter(cpuc, hwc); 280 + if (idx < 0) { 281 + err = idx; 282 + goto out; 283 + } 284 + 285 + /* 286 + * If there is an event in the counter we are going to use then 287 + * make sure it is disabled. 288 + */ 289 + event->hw.idx = idx; 290 + mipspmu->disable_event(idx); 291 + cpuc->events[idx] = event; 292 + 293 + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; 294 + if (flags & PERF_EF_START) 295 + mipspmu_start(event, PERF_EF_RELOAD); 296 + 297 + /* Propagate our changes to the userspace mapping. */ 298 + perf_event_update_userpage(event); 299 + 300 + out: 301 + perf_pmu_enable(event->pmu); 302 + return err; 303 + } 304 + 305 + static void mipspmu_del(struct perf_event *event, int flags) 200 306 { 201 307 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 202 308 struct hw_perf_event *hwc = &event->hw; 203 309 int idx = hwc->idx; 204 310 205 - 206 311 WARN_ON(idx < 0 || idx >= mipspmu->num_counters); 207 312 208 - /* We are working on a local event. */ 209 - mipspmu->disable_event(idx); 210 - 211 - barrier(); 212 - 213 - mipspmu_event_update(event, hwc, idx); 313 + mipspmu_stop(event, PERF_EF_UPDATE); 214 314 cpuc->events[idx] = NULL; 215 315 clear_bit(idx, cpuc->used_mask); 216 316 217 317 perf_event_update_userpage(event); 218 - } 219 - 220 - static void mipspmu_unthrottle(struct perf_event *event) 221 - { 222 - struct hw_perf_event *hwc = &event->hw; 223 - 224 - mipspmu->enable_event(hwc, hwc->idx); 225 318 } 226 319 227 320 static void mipspmu_read(struct perf_event *event) ··· 293 270 mipspmu_event_update(event, hwc, hwc->idx); 294 271 } 295 272 296 - static struct pmu pmu = { 297 - .enable = mipspmu_enable, 298 - .disable = mipspmu_disable, 299 - .unthrottle = mipspmu_unthrottle, 300 - .read = mipspmu_read, 301 - }; 273 + static void mipspmu_enable(struct pmu *pmu) 274 + { 275 + if (mipspmu) 276 + mipspmu->start(); 277 + } 278 + 279 + static void mipspmu_disable(struct pmu *pmu) 280 + { 281 + if (mipspmu) 282 + mipspmu->stop(); 283 + } 302 284 303 285 static atomic_t active_events = ATOMIC_INIT(0); 304 286 static DEFINE_MUTEX(pmu_reserve_mutex); ··· 345 317 else if (cp0_perfcount_irq < 0) 346 318 perf_irq = save_perf_irq; 347 319 } 320 + 321 + /* 322 + * mipsxx/rm9000/loongson2 have different performance counters, they have 323 + * specific low-level init routines. 324 + */ 325 + static void reset_counters(void *arg); 326 + static int __hw_perf_event_init(struct perf_event *event); 327 + 328 + static void hw_perf_event_destroy(struct perf_event *event) 329 + { 330 + if (atomic_dec_and_mutex_lock(&active_events, 331 + &pmu_reserve_mutex)) { 332 + /* 333 + * We must not call the destroy function with interrupts 334 + * disabled. 335 + */ 336 + on_each_cpu(reset_counters, 337 + (void *)(long)mipspmu->num_counters, 1); 338 + mipspmu_free_irq(); 339 + mutex_unlock(&pmu_reserve_mutex); 340 + } 341 + } 342 + 343 + static int mipspmu_event_init(struct perf_event *event) 344 + { 345 + int err = 0; 346 + 347 + switch (event->attr.type) { 348 + case PERF_TYPE_RAW: 349 + case PERF_TYPE_HARDWARE: 350 + case PERF_TYPE_HW_CACHE: 351 + break; 352 + 353 + default: 354 + return -ENOENT; 355 + } 356 + 357 + if (!mipspmu || event->cpu >= nr_cpumask_bits || 358 + (event->cpu >= 0 && !cpu_online(event->cpu))) 359 + return -ENODEV; 360 + 361 + if (!atomic_inc_not_zero(&active_events)) { 362 + if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) { 363 + atomic_dec(&active_events); 364 + return -ENOSPC; 365 + } 366 + 367 + mutex_lock(&pmu_reserve_mutex); 368 + if (atomic_read(&active_events) == 0) 369 + err = mipspmu_get_irq(); 370 + 371 + if (!err) 372 + atomic_inc(&active_events); 373 + mutex_unlock(&pmu_reserve_mutex); 374 + } 375 + 376 + if (err) 377 + return err; 378 + 379 + err = __hw_perf_event_init(event); 380 + if (err) 381 + hw_perf_event_destroy(event); 382 + 383 + return err; 384 + } 385 + 386 + static struct pmu pmu = { 387 + .pmu_enable = mipspmu_enable, 388 + .pmu_disable = mipspmu_disable, 389 + .event_init = mipspmu_event_init, 390 + .add = mipspmu_add, 391 + .del = mipspmu_del, 392 + .start = mipspmu_start, 393 + .stop = mipspmu_stop, 394 + .read = mipspmu_read, 395 + }; 348 396 349 397 static inline unsigned int 350 398 mipspmu_perf_event_encode(const struct mips_perf_event *pev) ··· 486 382 { 487 383 struct hw_perf_event fake_hwc = event->hw; 488 384 489 - if (event->pmu && event->pmu != &pmu) 490 - return 0; 385 + /* Allow mixed event group. So return 1 to pass validation. */ 386 + if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF) 387 + return 1; 491 388 492 389 return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0; 493 390 } ··· 514 409 return 0; 515 410 } 516 411 517 - /* 518 - * mipsxx/rm9000/loongson2 have different performance counters, they have 519 - * specific low-level init routines. 520 - */ 521 - static void reset_counters(void *arg); 522 - static int __hw_perf_event_init(struct perf_event *event); 523 - 524 - static void hw_perf_event_destroy(struct perf_event *event) 525 - { 526 - if (atomic_dec_and_mutex_lock(&active_events, 527 - &pmu_reserve_mutex)) { 528 - /* 529 - * We must not call the destroy function with interrupts 530 - * disabled. 531 - */ 532 - on_each_cpu(reset_counters, 533 - (void *)(long)mipspmu->num_counters, 1); 534 - mipspmu_free_irq(); 535 - mutex_unlock(&pmu_reserve_mutex); 536 - } 537 - } 538 - 539 - const struct pmu *hw_perf_event_init(struct perf_event *event) 540 - { 541 - int err = 0; 542 - 543 - if (!mipspmu || event->cpu >= nr_cpumask_bits || 544 - (event->cpu >= 0 && !cpu_online(event->cpu))) 545 - return ERR_PTR(-ENODEV); 546 - 547 - if (!atomic_inc_not_zero(&active_events)) { 548 - if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) { 549 - atomic_dec(&active_events); 550 - return ERR_PTR(-ENOSPC); 551 - } 552 - 553 - mutex_lock(&pmu_reserve_mutex); 554 - if (atomic_read(&active_events) == 0) 555 - err = mipspmu_get_irq(); 556 - 557 - if (!err) 558 - atomic_inc(&active_events); 559 - mutex_unlock(&pmu_reserve_mutex); 560 - } 561 - 562 - if (err) 563 - return ERR_PTR(err); 564 - 565 - err = __hw_perf_event_init(event); 566 - if (err) 567 - hw_perf_event_destroy(event); 568 - 569 - return err ? ERR_PTR(err) : &pmu; 570 - } 571 - 572 - void hw_perf_enable(void) 573 - { 574 - if (mipspmu) 575 - mipspmu->start(); 576 - } 577 - 578 - void hw_perf_disable(void) 579 - { 580 - if (mipspmu) 581 - mipspmu->stop(); 582 - } 583 - 584 412 /* This is needed by specific irq handlers in perf_event_*.c */ 585 413 static void 586 414 handle_associated_event(struct cpu_hw_events *cpuc, ··· 534 496 #include "perf_event_mipsxx.c" 535 497 536 498 /* Callchain handling code. */ 537 - static inline void 538 - callchain_store(struct perf_callchain_entry *entry, 539 - u64 ip) 540 - { 541 - if (entry->nr < PERF_MAX_STACK_DEPTH) 542 - entry->ip[entry->nr++] = ip; 543 - } 544 499 545 500 /* 546 501 * Leave userspace callchain empty for now. When we find a way to trace 547 502 * the user stack callchains, we add here. 548 503 */ 549 - static void 550 - perf_callchain_user(struct pt_regs *regs, 551 - struct perf_callchain_entry *entry) 504 + void perf_callchain_user(struct perf_callchain_entry *entry, 505 + struct pt_regs *regs) 552 506 { 553 507 } 554 508 ··· 553 523 while (!kstack_end(sp)) { 554 524 addr = *sp++; 555 525 if (__kernel_text_address(addr)) { 556 - callchain_store(entry, addr); 526 + perf_callchain_store(entry, addr); 557 527 if (entry->nr >= PERF_MAX_STACK_DEPTH) 558 528 break; 559 529 } 560 530 } 561 531 } 562 532 563 - static void 564 - perf_callchain_kernel(struct pt_regs *regs, 565 - struct perf_callchain_entry *entry) 533 + void perf_callchain_kernel(struct perf_callchain_entry *entry, 534 + struct pt_regs *regs) 566 535 { 567 536 unsigned long sp = regs->regs[29]; 568 537 #ifdef CONFIG_KALLSYMS 569 538 unsigned long ra = regs->regs[31]; 570 539 unsigned long pc = regs->cp0_epc; 571 540 572 - callchain_store(entry, PERF_CONTEXT_KERNEL); 573 541 if (raw_show_trace || !__kernel_text_address(pc)) { 574 542 unsigned long stack_page = 575 543 (unsigned long)task_stack_page(current); ··· 577 549 return; 578 550 } 579 551 do { 580 - callchain_store(entry, pc); 552 + perf_callchain_store(entry, pc); 581 553 if (entry->nr >= PERF_MAX_STACK_DEPTH) 582 554 break; 583 555 pc = unwind_stack(current, &sp, pc, &ra); 584 556 } while (pc); 585 557 #else 586 - callchain_store(entry, PERF_CONTEXT_KERNEL); 587 558 save_raw_perf_callchain(entry, sp); 588 559 #endif 589 - } 590 - 591 - static void 592 - perf_do_callchain(struct pt_regs *regs, 593 - struct perf_callchain_entry *entry) 594 - { 595 - int is_user; 596 - 597 - if (!regs) 598 - return; 599 - 600 - is_user = user_mode(regs); 601 - 602 - if (!current || !current->pid) 603 - return; 604 - 605 - if (is_user && current->state != TASK_RUNNING) 606 - return; 607 - 608 - if (!is_user) { 609 - perf_callchain_kernel(regs, entry); 610 - if (current->mm) 611 - regs = task_pt_regs(current); 612 - else 613 - regs = NULL; 614 - } 615 - if (regs) 616 - perf_callchain_user(regs, entry); 617 - } 618 - 619 - static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry); 620 - 621 - struct perf_callchain_entry * 622 - perf_callchain(struct pt_regs *regs) 623 - { 624 - struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry); 625 - 626 - entry->nr = 0; 627 - perf_do_callchain(regs, entry); 628 - return entry; 629 560 }
+3 -1
arch/mips/kernel/perf_event_mipsxx.c
··· 696 696 * interrupt, not NMI. 697 697 */ 698 698 if (handled == IRQ_HANDLED) 699 - perf_event_do_pending(); 699 + irq_work_run(); 700 700 701 701 #ifdef CONFIG_MIPS_MT_SMP 702 702 read_unlock(&pmuint_rwlock); ··· 1044 1044 pr_cont("%s PMU enabled, %d counters available to each " 1045 1045 "CPU, irq %d%s\n", mipspmu->name, counters, irq, 1046 1046 irq < 0 ? " (share with timer interrupt)" : ""); 1047 + 1048 + perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); 1047 1049 1048 1050 return 0; 1049 1051 }
+1 -1
arch/mips/kernel/signal.c
··· 84 84 85 85 static int protected_restore_fp_context(struct sigcontext __user *sc) 86 86 { 87 - int err, tmp; 87 + int err, tmp __maybe_unused; 88 88 while (1) { 89 89 lock_fpu_owner(); 90 90 own_fpu_inatomic(0);
+1 -1
arch/mips/kernel/signal32.c
··· 115 115 116 116 static int protected_restore_fp_context32(struct sigcontext32 __user *sc) 117 117 { 118 - int err, tmp; 118 + int err, tmp __maybe_unused; 119 119 while (1) { 120 120 lock_fpu_owner(); 121 121 own_fpu_inatomic(0);
+29 -2
arch/mips/kernel/smp.c
··· 193 193 */ 194 194 static struct task_struct *cpu_idle_thread[NR_CPUS]; 195 195 196 + struct create_idle { 197 + struct work_struct work; 198 + struct task_struct *idle; 199 + struct completion done; 200 + int cpu; 201 + }; 202 + 203 + static void __cpuinit do_fork_idle(struct work_struct *work) 204 + { 205 + struct create_idle *c_idle = 206 + container_of(work, struct create_idle, work); 207 + 208 + c_idle->idle = fork_idle(c_idle->cpu); 209 + complete(&c_idle->done); 210 + } 211 + 196 212 int __cpuinit __cpu_up(unsigned int cpu) 197 213 { 198 214 struct task_struct *idle; ··· 219 203 * Linux can schedule processes on this slave. 220 204 */ 221 205 if (!cpu_idle_thread[cpu]) { 222 - idle = fork_idle(cpu); 223 - cpu_idle_thread[cpu] = idle; 206 + /* 207 + * Schedule work item to avoid forking user task 208 + * Ported from arch/x86/kernel/smpboot.c 209 + */ 210 + struct create_idle c_idle = { 211 + .cpu = cpu, 212 + .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), 213 + }; 214 + 215 + INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle); 216 + schedule_work(&c_idle.work); 217 + wait_for_completion(&c_idle.done); 218 + idle = cpu_idle_thread[cpu] = c_idle.idle; 224 219 225 220 if (IS_ERR(idle)) 226 221 panic(KERN_ERR "Fork failed for CPU %d", cpu);
+2 -3
arch/mips/kernel/syscall.c
··· 383 383 static int __used noinline 384 384 _sys_sysmips(nabi_no_regargs struct pt_regs regs) 385 385 { 386 - long cmd, arg1, arg2, arg3; 386 + long cmd, arg1, arg2; 387 387 388 388 cmd = regs.regs[4]; 389 389 arg1 = regs.regs[5]; 390 390 arg2 = regs.regs[6]; 391 - arg3 = regs.regs[7]; 392 391 393 392 switch (cmd) { 394 393 case MIPS_ATOMIC_SET: ··· 404 405 if (arg1 & 2) 405 406 set_thread_flag(TIF_LOGADE); 406 407 else 407 - clear_thread_flag(TIF_FIXADE); 408 + clear_thread_flag(TIF_LOGADE); 408 409 409 410 return 0; 410 411
+2 -2
arch/mips/kernel/vpe.c
··· 148 148 spinlock_t tc_list_lock; 149 149 struct list_head tc_list; /* Thread contexts */ 150 150 } vpecontrol = { 151 - .vpe_list_lock = SPIN_LOCK_UNLOCKED, 151 + .vpe_list_lock = __SPIN_LOCK_UNLOCKED(vpe_list_lock), 152 152 .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list), 153 - .tc_list_lock = SPIN_LOCK_UNLOCKED, 153 + .tc_list_lock = __SPIN_LOCK_UNLOCKED(tc_list_lock), 154 154 .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list) 155 155 }; 156 156
+4 -1
arch/mips/loongson/Kconfig
··· 1 + if MACH_LOONGSON 2 + 1 3 choice 2 4 prompt "Machine Type" 3 - depends on MACH_LOONGSON 4 5 5 6 config LEMOTE_FULOONG2E 6 7 bool "Lemote Fuloong(2e) mini-PC" ··· 88 87 config LOONGSON_MC146818 89 88 bool 90 89 default n 90 + 91 + endif # MACH_LOONGSON
-5
arch/mips/loongson/common/cmdline.c
··· 44 44 strcat(arcs_cmdline, " "); 45 45 } 46 46 47 - if ((strstr(arcs_cmdline, "console=")) == NULL) 48 - strcat(arcs_cmdline, " console=ttyS0,115200"); 49 - if ((strstr(arcs_cmdline, "root=")) == NULL) 50 - strcat(arcs_cmdline, " root=/dev/hda1"); 51 - 52 47 prom_init_machtype(); 53 48 }
+2 -1
arch/mips/loongson/common/machtype.c
··· 41 41 42 42 void __init prom_init_machtype(void) 43 43 { 44 - char *p, str[MACHTYPE_LEN]; 44 + char *p, str[MACHTYPE_LEN + 1]; 45 45 int machtype = MACH_LEMOTE_FL2E; 46 46 47 47 mips_machtype = LOONGSON_MACHTYPE; ··· 53 53 } 54 54 p += strlen("machtype="); 55 55 strncpy(str, p, MACHTYPE_LEN); 56 + str[MACHTYPE_LEN] = '\0'; 56 57 p = strstr(str, " "); 57 58 if (p) 58 59 *p = '\0';
+2 -2
arch/mips/math-emu/ieee754int.h
··· 70 70 71 71 72 72 #define COMPXSP \ 73 - unsigned xm; int xe; int xs; int xc 73 + unsigned xm; int xe; int xs __maybe_unused; int xc 74 74 75 75 #define COMPYSP \ 76 76 unsigned ym; int ye; int ys; int yc ··· 104 104 105 105 106 106 #define COMPXDP \ 107 - u64 xm; int xe; int xs; int xc 107 + u64 xm; int xe; int xs __maybe_unused; int xc 108 108 109 109 #define COMPYDP \ 110 110 u64 ym; int ye; int ys; int yc
+1 -1
arch/mips/mm/init.c
··· 324 324 void __init paging_init(void) 325 325 { 326 326 unsigned long max_zone_pfns[MAX_NR_ZONES]; 327 - unsigned long lastpfn; 327 + unsigned long lastpfn __maybe_unused; 328 328 329 329 pagetable_init(); 330 330
+2
arch/mips/mm/tlbex.c
··· 109 109 static int scratchpad_offset(int i) 110 110 { 111 111 BUG(); 112 + /* Really unreachable, but evidently some GCC want this. */ 113 + return 0; 112 114 } 113 115 #endif 114 116 /*
+2 -2
arch/mips/pci/ops-pmcmsp.c
··· 308 308 * RETURNS: PCIBIOS_SUCCESSFUL - success 309 309 * 310 310 ****************************************************************************/ 311 - static int bpci_interrupt(int irq, void *dev_id) 311 + static irqreturn_t bpci_interrupt(int irq, void *dev_id) 312 312 { 313 313 struct msp_pci_regs *preg = (void *)PCI_BASE_REG; 314 314 unsigned int stat = preg->if_status; ··· 326 326 /* write to clear all asserted interrupts */ 327 327 preg->if_status = stat; 328 328 329 - return PCIBIOS_SUCCESSFUL; 329 + return IRQ_HANDLED; 330 330 } 331 331 332 332 /*****************************************************************************
-4
arch/mips/pmc-sierra/Kconfig
··· 4 4 5 5 config PMC_MSP4200_EVAL 6 6 bool "PMC-Sierra MSP4200 Eval Board" 7 - select CEVT_R4K 8 - select CSRC_R4K 9 7 select IRQ_MSP_SLP 10 8 select HW_HAS_PCI 11 9 12 10 config PMC_MSP4200_GW 13 11 bool "PMC-Sierra MSP4200 VoIP Gateway" 14 - select CEVT_R4K 15 - select CSRC_R4K 16 12 select IRQ_MSP_SLP 17 13 select HW_HAS_PCI 18 14
+1 -1
arch/mips/pmc-sierra/msp71xx/msp_time.c
··· 81 81 mips_hpt_frequency = cpu_rate/2; 82 82 } 83 83 84 - unsigned int __init get_c0_compare_int(void) 84 + unsigned int __cpuinit get_c0_compare_int(void) 85 85 { 86 86 return MSP_INT_VPE0_TIMER; 87 87 }