Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6

+111 -13
+2
arch/sparc64/kernel/setup.c
··· 542 542 } 543 543 #endif 544 544 545 + smp_setup_cpu_possible_map(); 546 + 545 547 paging_init(); 546 548 } 547 549
+19 -9
arch/sparc64/kernel/smp.c
··· 1079 1079 return 0; 1080 1080 } 1081 1081 1082 + /* Constrain the number of cpus to max_cpus. */ 1082 1083 void __init smp_prepare_cpus(unsigned int max_cpus) 1083 1084 { 1084 - int instance, mid; 1085 - 1086 - instance = 0; 1087 - while (!cpu_find_by_instance(instance, NULL, &mid)) { 1088 - if (mid < max_cpus) 1089 - cpu_set(mid, phys_cpu_present_map); 1090 - instance++; 1091 - } 1092 - 1093 1085 if (num_possible_cpus() > max_cpus) { 1086 + int instance, mid; 1087 + 1094 1088 instance = 0; 1095 1089 while (!cpu_find_by_instance(instance, NULL, &mid)) { 1096 1090 if (mid != boot_cpu_id) { ··· 1097 1103 } 1098 1104 1099 1105 smp_store_cpu_info(boot_cpu_id); 1106 + } 1107 + 1108 + /* Set this up early so that things like the scheduler can init 1109 + * properly. We use the same cpu mask for both the present and 1110 + * possible cpu map. 1111 + */ 1112 + void __init smp_setup_cpu_possible_map(void) 1113 + { 1114 + int instance, mid; 1115 + 1116 + instance = 0; 1117 + while (!cpu_find_by_instance(instance, NULL, &mid)) { 1118 + if (mid < NR_CPUS) 1119 + cpu_set(mid, phys_cpu_present_map); 1120 + instance++; 1121 + } 1100 1122 } 1101 1123 1102 1124 void __devinit smp_prepare_boot_cpu(void)
+84 -4
include/asm-sparc64/futex.h
··· 1 - #ifndef _ASM_FUTEX_H 2 - #define _ASM_FUTEX_H 1 + #ifndef _SPARC64_FUTEX_H 2 + #define _SPARC64_FUTEX_H 3 3 4 - #include <asm-generic/futex.h> 4 + #include <linux/futex.h> 5 + #include <asm/errno.h> 6 + #include <asm/system.h> 7 + #include <asm/uaccess.h> 5 8 6 - #endif 9 + #define __futex_cas_op(insn, ret, oldval, uaddr, oparg) \ 10 + __asm__ __volatile__( \ 11 + "\n1: lduwa [%3] %%asi, %2\n" \ 12 + " " insn "\n" \ 13 + "2: casa [%3] %%asi, %2, %1\n" \ 14 + " cmp %2, %1\n" \ 15 + " bne,pn %%icc, 1b\n" \ 16 + " mov 0, %0\n" \ 17 + "3:\n" \ 18 + " .section .fixup,#alloc,#execinstr\n" \ 19 + " .align 4\n" \ 20 + "4: ba 3b\n" \ 21 + " mov %5, %0\n" \ 22 + " .previous\n" \ 23 + " .section __ex_table,#alloc\n" \ 24 + " .align 4\n" \ 25 + " .word 1b, 4b\n" \ 26 + " .word 2b, 4b\n" \ 27 + " .previous\n" \ 28 + : "=&r" (ret), "=&r" (oldval), "=&r" (tem) \ 29 + : "r" (uaddr), "r" (oparg), "i" (-EFAULT) \ 30 + : "memory") 31 + 32 + static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) 33 + { 34 + int op = (encoded_op >> 28) & 7; 35 + int cmp = (encoded_op >> 24) & 15; 36 + int oparg = (encoded_op << 8) >> 20; 37 + int cmparg = (encoded_op << 20) >> 20; 38 + int oldval = 0, ret, tem; 39 + 40 + if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))) 41 + return -EFAULT; 42 + if (unlikely((((unsigned long) uaddr) & 0x3UL))) 43 + return -EINVAL; 44 + 45 + if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) 46 + oparg = 1 << oparg; 47 + 48 + inc_preempt_count(); 49 + 50 + switch (op) { 51 + case FUTEX_OP_SET: 52 + __futex_cas_op("mov\t%4, %1", ret, oldval, uaddr, oparg); 53 + break; 54 + case FUTEX_OP_ADD: 55 + __futex_cas_op("add\t%2, %4, %1", ret, oldval, uaddr, oparg); 56 + break; 57 + case FUTEX_OP_OR: 58 + __futex_cas_op("or\t%2, %4, %1", ret, oldval, uaddr, oparg); 59 + break; 60 + case FUTEX_OP_ANDN: 61 + __futex_cas_op("and\t%2, %4, %1", ret, oldval, uaddr, oparg); 62 + break; 63 + case FUTEX_OP_XOR: 64 + __futex_cas_op("xor\t%2, %4, %1", ret, oldval, uaddr, oparg); 65 + break; 66 + default: 67 + ret = -ENOSYS; 68 + } 69 + 70 + dec_preempt_count(); 71 + 72 + if (!ret) { 73 + switch (cmp) { 74 + case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; 75 + case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; 76 + case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; 77 + case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; 78 + case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; 79 + case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; 80 + default: ret = -ENOSYS; 81 + } 82 + } 83 + return ret; 84 + } 85 + 86 + #endif /* !(_SPARC64_FUTEX_H) */
+6
include/asm-sparc64/smp.h
··· 66 66 67 67 #define raw_smp_processor_id() (current_thread_info()->cpu) 68 68 69 + extern void smp_setup_cpu_possible_map(void); 70 + 69 71 #endif /* !(__ASSEMBLY__) */ 72 + 73 + #else 74 + 75 + #define smp_setup_cpu_possible_map() do { } while (0) 70 76 71 77 #endif /* !(CONFIG_SMP) */ 72 78