Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6:
[IA64] fix ia64 kprobes compilation
[IA64] move gcc_intrin.h from header-y to unifdef-y
[IA64] workaround tiger ia64_sal_get_physical_id_info hang
[IA64] move defconfig to arch/ia64/configs/
[IA64] Fix irq migration in multiple vector domain
[IA64] signal(ia64_ia32): add a signal stack overflow check
[IA64] signal(ia64): add a signal stack overflow check
[IA64] CONFIG_SGI_SN2 - auto select NUMA and ACPI_NUMA

+180 -48
+2
arch/ia64/Kconfig
··· 156 156 157 157 config IA64_SGI_SN2 158 158 bool "SGI-SN2" 159 + select NUMA 160 + select ACPI_NUMA 159 161 help 160 162 Selecting this option will optimize the kernel for use on sn2 based 161 163 systems, but the resulting kernel binary will not run on other
+2
arch/ia64/Makefile
··· 11 11 # Copyright (C) 1998-2004 by David Mosberger-Tang <davidm@hpl.hp.com> 12 12 # 13 13 14 + KBUILD_DEFCONFIG := generic_defconfig 15 + 14 16 NM := $(CROSS_COMPILE)nm -B 15 17 READELF := $(CROSS_COMPILE)readelf 16 18
arch/ia64/defconfig arch/ia64/configs/generic_defconfig
+12 -1
arch/ia64/ia32/ia32_signal.c
··· 766 766 767 767 /* This is the X/Open sanctioned signal stack switching. */ 768 768 if (ka->sa.sa_flags & SA_ONSTACK) { 769 - if (!on_sig_stack(esp)) 769 + int onstack = sas_ss_flags(esp); 770 + 771 + if (onstack == 0) 770 772 esp = current->sas_ss_sp + current->sas_ss_size; 773 + else if (onstack == SS_ONSTACK) { 774 + /* 775 + * If we are on the alternate signal stack and would 776 + * overflow it, don't. Return an always-bogus address 777 + * instead so we will die with SIGSEGV. 778 + */ 779 + if (!likely(on_sig_stack(esp - frame_size))) 780 + return (void __user *) -1L; 781 + } 771 782 } 772 783 /* Legacy stack switching not supported */ 773 784
+3 -1
arch/ia64/kernel/iosapic.c
··· 345 345 if (cpus_empty(mask)) 346 346 return; 347 347 348 - if (reassign_irq_vector(irq, first_cpu(mask))) 348 + if (irq_prepare_move(irq, first_cpu(mask))) 349 349 return; 350 350 351 351 dest = cpu_physical_id(first_cpu(mask)); ··· 397 397 struct iosapic_rte_info *rte; 398 398 int do_unmask_irq = 0; 399 399 400 + irq_complete_move(irq); 400 401 if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) { 401 402 do_unmask_irq = 1; 402 403 mask_irq(irq); ··· 451 450 { 452 451 irq_desc_t *idesc = irq_desc + irq; 453 452 453 + irq_complete_move(irq); 454 454 move_native_irq(irq); 455 455 /* 456 456 * Once we have recorded IRQ_PENDING already, we can mask the
+104 -30
arch/ia64/kernel/irq_ia64.c
··· 260 260 } 261 261 262 262 #if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)) 263 + #define IA64_IRQ_MOVE_VECTOR IA64_DEF_FIRST_DEVICE_VECTOR 264 + 263 265 static enum vector_domain_type { 264 266 VECTOR_DOMAIN_NONE, 265 267 VECTOR_DOMAIN_PERCPU ··· 273 271 return cpumask_of_cpu(cpu); 274 272 return CPU_MASK_ALL; 275 273 } 274 + 275 + static int __irq_prepare_move(int irq, int cpu) 276 + { 277 + struct irq_cfg *cfg = &irq_cfg[irq]; 278 + int vector; 279 + cpumask_t domain; 280 + 281 + if (cfg->move_in_progress || cfg->move_cleanup_count) 282 + return -EBUSY; 283 + if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu)) 284 + return -EINVAL; 285 + if (cpu_isset(cpu, cfg->domain)) 286 + return 0; 287 + domain = vector_allocation_domain(cpu); 288 + vector = find_unassigned_vector(domain); 289 + if (vector < 0) 290 + return -ENOSPC; 291 + cfg->move_in_progress = 1; 292 + cfg->old_domain = cfg->domain; 293 + cfg->vector = IRQ_VECTOR_UNASSIGNED; 294 + cfg->domain = CPU_MASK_NONE; 295 + BUG_ON(__bind_irq_vector(irq, vector, domain)); 296 + return 0; 297 + } 298 + 299 + int irq_prepare_move(int irq, int cpu) 300 + { 301 + unsigned long flags; 302 + int ret; 303 + 304 + spin_lock_irqsave(&vector_lock, flags); 305 + ret = __irq_prepare_move(irq, cpu); 306 + spin_unlock_irqrestore(&vector_lock, flags); 307 + return ret; 308 + } 309 + 310 + void irq_complete_move(unsigned irq) 311 + { 312 + struct irq_cfg *cfg = &irq_cfg[irq]; 313 + cpumask_t cleanup_mask; 314 + int i; 315 + 316 + if (likely(!cfg->move_in_progress)) 317 + return; 318 + 319 + if (unlikely(cpu_isset(smp_processor_id(), cfg->old_domain))) 320 + return; 321 + 322 + cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map); 323 + cfg->move_cleanup_count = cpus_weight(cleanup_mask); 324 + for_each_cpu_mask(i, cleanup_mask) 325 + platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0); 326 + cfg->move_in_progress = 0; 327 + } 328 + 329 + static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id) 330 + { 331 + int me = smp_processor_id(); 332 + ia64_vector vector; 333 + unsigned long flags; 334 + 335 + for (vector = IA64_FIRST_DEVICE_VECTOR; 336 + vector < IA64_LAST_DEVICE_VECTOR; vector++) { 337 + int irq; 338 + struct irq_desc *desc; 339 + struct irq_cfg *cfg; 340 + irq = __get_cpu_var(vector_irq)[vector]; 341 + if (irq < 0) 342 + continue; 343 + 344 + desc = irq_desc + irq; 345 + cfg = irq_cfg + irq; 346 + spin_lock(&desc->lock); 347 + if (!cfg->move_cleanup_count) 348 + goto unlock; 349 + 350 + if (!cpu_isset(me, cfg->old_domain)) 351 + goto unlock; 352 + 353 + spin_lock_irqsave(&vector_lock, flags); 354 + __get_cpu_var(vector_irq)[vector] = -1; 355 + cpu_clear(me, vector_table[vector]); 356 + spin_unlock_irqrestore(&vector_lock, flags); 357 + cfg->move_cleanup_count--; 358 + unlock: 359 + spin_unlock(&desc->lock); 360 + } 361 + return IRQ_HANDLED; 362 + } 363 + 364 + static struct irqaction irq_move_irqaction = { 365 + .handler = smp_irq_move_cleanup_interrupt, 366 + .flags = IRQF_DISABLED, 367 + .name = "irq_move" 368 + }; 276 369 277 370 static int __init parse_vector_domain(char *arg) 278 371 { ··· 398 301 __clear_irq_vector(irq); 399 302 irq_status[irq] = IRQ_RSVD; 400 303 spin_unlock_irqrestore(&vector_lock, flags); 401 - } 402 - 403 - static int __reassign_irq_vector(int irq, int cpu) 404 - { 405 - struct irq_cfg *cfg = &irq_cfg[irq]; 406 - int vector; 407 - cpumask_t domain; 408 - 409 - if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu)) 410 - return -EINVAL; 411 - if (cpu_isset(cpu, cfg->domain)) 412 - return 0; 413 - domain = vector_allocation_domain(cpu); 414 - vector = find_unassigned_vector(domain); 415 - if (vector < 0) 416 - return -ENOSPC; 417 - __clear_irq_vector(irq); 418 - BUG_ON(__bind_irq_vector(irq, vector, domain)); 419 - return 0; 420 - } 421 - 422 - int reassign_irq_vector(int irq, int cpu) 423 - { 424 - unsigned long flags; 425 - int ret; 426 - 427 - spin_lock_irqsave(&vector_lock, flags); 428 - ret = __reassign_irq_vector(irq, cpu); 429 - spin_unlock_irqrestore(&vector_lock, flags); 430 - return ret; 431 304 } 432 305 433 306 /* ··· 645 578 register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction); 646 579 register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction); 647 580 register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &tlb_irqaction); 581 + #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG) 582 + if (vector_domain_type != VECTOR_DOMAIN_NONE) { 583 + BUG_ON(IA64_FIRST_DEVICE_VECTOR != IA64_IRQ_MOVE_VECTOR); 584 + IA64_FIRST_DEVICE_VECTOR++; 585 + register_percpu_irq(IA64_IRQ_MOVE_VECTOR, &irq_move_irqaction); 586 + } 587 + #endif 648 588 #endif 649 589 #ifdef CONFIG_PERFMON 650 590 pfm_init_percpu();
+5
arch/ia64/kernel/kprobes.c
··· 1001 1001 return 1; 1002 1002 } 1003 1003 1004 + /* ia64 does not need this */ 1005 + void __kprobes jprobe_return(void) 1006 + { 1007 + } 1008 + 1004 1009 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) 1005 1010 { 1006 1011 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+2 -1
arch/ia64/kernel/msi_ia64.c
··· 57 57 if (!cpu_online(cpu)) 58 58 return; 59 59 60 - if (reassign_irq_vector(irq, cpu)) 60 + if (irq_prepare_move(irq, cpu)) 61 61 return; 62 62 63 63 read_msi_msg(irq, &msg); ··· 119 119 120 120 static void ia64_ack_msi_irq(unsigned int irq) 121 121 { 122 + irq_complete_move(irq); 122 123 move_native_irq(irq); 123 124 ia64_eoi(); 124 125 }
+7
arch/ia64/kernel/sal.c
··· 109 109 sal_revision = SAL_VERSION_CODE(2, 8); 110 110 sal_version = SAL_VERSION_CODE(0, 0); 111 111 } 112 + 113 + if (ia64_platform_is("sn2") && (sal_revision == SAL_VERSION_CODE(2, 9))) 114 + /* 115 + * SGI Altix has hard-coded version 2.9 in their prom 116 + * but they actually implement 3.2, so let's fix it here. 117 + */ 118 + sal_revision = SAL_VERSION_CODE(3, 2); 112 119 } 113 120 114 121 static void __init
+27 -9
arch/ia64/kernel/signal.c
··· 342 342 343 343 new_sp = scr->pt.r12; 344 344 tramp_addr = (unsigned long) __kernel_sigtramp; 345 - if ((ka->sa.sa_flags & SA_ONSTACK) && sas_ss_flags(new_sp) == 0) { 346 - new_sp = current->sas_ss_sp + current->sas_ss_size; 347 - /* 348 - * We need to check for the register stack being on the signal stack 349 - * separately, because it's switched separately (memory stack is switched 350 - * in the kernel, register stack is switched in the signal trampoline). 351 - */ 352 - if (!rbs_on_sig_stack(scr->pt.ar_bspstore)) 353 - new_rbs = (current->sas_ss_sp + sizeof(long) - 1) & ~(sizeof(long) - 1); 345 + if (ka->sa.sa_flags & SA_ONSTACK) { 346 + int onstack = sas_ss_flags(new_sp); 347 + 348 + if (onstack == 0) { 349 + new_sp = current->sas_ss_sp + current->sas_ss_size; 350 + /* 351 + * We need to check for the register stack being on the 352 + * signal stack separately, because it's switched 353 + * separately (memory stack is switched in the kernel, 354 + * register stack is switched in the signal trampoline). 355 + */ 356 + if (!rbs_on_sig_stack(scr->pt.ar_bspstore)) 357 + new_rbs = ALIGN(current->sas_ss_sp, 358 + sizeof(long)); 359 + } else if (onstack == SS_ONSTACK) { 360 + unsigned long check_sp; 361 + 362 + /* 363 + * If we are on the alternate signal stack and would 364 + * overflow it, don't. Return an always-bogus address 365 + * instead so we will die with SIGSEGV. 366 + */ 367 + check_sp = (new_sp - sizeof(*frame)) & -STACK_ALIGN; 368 + if (!likely(on_sig_stack(check_sp))) 369 + return force_sigsegv_info(sig, (void __user *) 370 + check_sp); 371 + } 354 372 } 355 373 frame = (void __user *) ((new_sp - sizeof(*frame)) & -STACK_ALIGN); 356 374
+1 -1
include/asm-ia64/Kbuild
··· 3 3 header-y += break.h 4 4 header-y += fpu.h 5 5 header-y += fpswa.h 6 - header-y += gcc_intrin.h 7 6 header-y += ia64regs.h 8 7 header-y += intel_intrin.h 9 8 header-y += intrinsics.h ··· 11 12 header-y += rse.h 12 13 header-y += ucontext.h 13 14 15 + unifdef-y += gcc_intrin.h 14 16 unifdef-y += perfmon.h 15 17 unifdef-y += ustack.h
+11 -1
include/asm-ia64/hw_irq.h
··· 93 93 struct irq_cfg { 94 94 ia64_vector vector; 95 95 cpumask_t domain; 96 + cpumask_t old_domain; 97 + unsigned move_cleanup_count; 98 + u8 move_in_progress : 1; 96 99 }; 97 100 extern spinlock_t vector_lock; 98 101 extern struct irq_cfg irq_cfg[NR_IRQS]; ··· 109 106 extern void free_irq_vector (int vector); 110 107 extern int reserve_irq_vector (int vector); 111 108 extern void __setup_vector_irq(int cpu); 112 - extern int reassign_irq_vector(int irq, int cpu); 113 109 extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect); 114 110 extern void register_percpu_irq (ia64_vector vec, struct irqaction *action); 115 111 extern int check_irq_used (int irq); 116 112 extern void destroy_and_reserve_irq (unsigned int irq); 113 + 114 + #if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)) 115 + extern int irq_prepare_move(int irq, int cpu); 116 + extern void irq_complete_move(unsigned int irq); 117 + #else 118 + static inline int irq_prepare_move(int irq, int cpu) { return 0; } 119 + static inline void irq_complete_move(unsigned int irq) {} 120 + #endif 117 121 118 122 static inline void ia64_resend_irq(unsigned int vector) 119 123 {
-4
include/asm-ia64/kprobes.h
··· 121 121 extern int kprobe_exceptions_notify(struct notifier_block *self, 122 122 unsigned long val, void *data); 123 123 124 - /* ia64 does not need this */ 125 - static inline void jprobe_return(void) 126 - { 127 - } 128 124 extern void invalidate_stacked_regs(void); 129 125 extern void flush_register_stack(void); 130 126 extern void arch_remove_kprobe(struct kprobe *p);
+4
include/asm-ia64/sal.h
··· 807 807 ia64_sal_physical_id_info(u16 *splid) 808 808 { 809 809 struct ia64_sal_retval isrv; 810 + 811 + if (sal_revision < SAL_VERSION_CODE(3,2)) 812 + return -1; 813 + 810 814 SAL_CALL(isrv, SAL_PHYSICAL_ID_INFO, 0, 0, 0, 0, 0, 0, 0); 811 815 if (splid) 812 816 *splid = isrv.v0;