Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc

* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc:
powerpc: Fix hcall tracepoint recursion
powerpc/numa: Fix bug in unmap_cpu_from_node
powerpc/numa: Disable VPHN on dedicated processor partitions
powerpc/numa: Add length when creating OF properties via VPHN
powerpc/numa: Check for all VPHN changes
powerpc/numa: Only use active VPHN count fields
powerpc/pseries: Remove unnecessary variable initializations in numa.c
powerpc/pseries: Fix brace placement in numa.c
powerpc/pseries: Fix typo in VPHN comments
powerpc: Fix some 6xx/7xxx CPU setup functions
powerpc: Pass the right cpu_spec to ->setup_cpu() on 64-bit
powerpc/book3e: Protect complex macro args in mmu-book3e.h
powerpc: Fix pfn_valid() when memory starts at a non-zero address

+94 -52
+4 -4
arch/powerpc/include/asm/mmu-book3e.h
··· 40 40 41 41 /* MAS registers bit definitions */ 42 42 43 - #define MAS0_TLBSEL(x) ((x << 28) & 0x30000000) 44 - #define MAS0_ESEL(x) ((x << 16) & 0x0FFF0000) 43 + #define MAS0_TLBSEL(x) (((x) << 28) & 0x30000000) 44 + #define MAS0_ESEL(x) (((x) << 16) & 0x0FFF0000) 45 45 #define MAS0_NV(x) ((x) & 0x00000FFF) 46 46 #define MAS0_HES 0x00004000 47 47 #define MAS0_WQ_ALLWAYS 0x00000000 ··· 50 50 51 51 #define MAS1_VALID 0x80000000 52 52 #define MAS1_IPROT 0x40000000 53 - #define MAS1_TID(x) ((x << 16) & 0x3FFF0000) 53 + #define MAS1_TID(x) (((x) << 16) & 0x3FFF0000) 54 54 #define MAS1_IND 0x00002000 55 55 #define MAS1_TS 0x00001000 56 56 #define MAS1_TSIZE_MASK 0x00000f80 57 57 #define MAS1_TSIZE_SHIFT 7 58 - #define MAS1_TSIZE(x) ((x << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK) 58 + #define MAS1_TSIZE(x) (((x) << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK) 59 59 60 60 #define MAS2_EPN 0xFFFFF000 61 61 #define MAS2_X0 0x00000040
+1 -1
arch/powerpc/include/asm/page.h
··· 101 101 102 102 #ifdef CONFIG_FLATMEM 103 103 #define ARCH_PFN_OFFSET (MEMORY_START >> PAGE_SHIFT) 104 - #define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < (ARCH_PFN_OFFSET + max_mapnr)) 104 + #define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < max_mapnr) 105 105 #endif 106 106 107 107 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
+20 -20
arch/powerpc/kernel/cpu_setup_6xx.S
··· 18 18 #include <asm/mmu.h> 19 19 20 20 _GLOBAL(__setup_cpu_603) 21 - mflr r4 21 + mflr r5 22 22 BEGIN_MMU_FTR_SECTION 23 23 li r10,0 24 24 mtspr SPRN_SPRG_603_LRU,r10 /* init SW LRU tracking */ ··· 27 27 bl __init_fpu_registers 28 28 END_FTR_SECTION_IFCLR(CPU_FTR_FPU_UNAVAILABLE) 29 29 bl setup_common_caches 30 - mtlr r4 30 + mtlr r5 31 31 blr 32 32 _GLOBAL(__setup_cpu_604) 33 - mflr r4 33 + mflr r5 34 34 bl setup_common_caches 35 35 bl setup_604_hid0 36 - mtlr r4 36 + mtlr r5 37 37 blr 38 38 _GLOBAL(__setup_cpu_750) 39 - mflr r4 39 + mflr r5 40 40 bl __init_fpu_registers 41 41 bl setup_common_caches 42 42 bl setup_750_7400_hid0 43 - mtlr r4 43 + mtlr r5 44 44 blr 45 45 _GLOBAL(__setup_cpu_750cx) 46 - mflr r4 46 + mflr r5 47 47 bl __init_fpu_registers 48 48 bl setup_common_caches 49 49 bl setup_750_7400_hid0 50 50 bl setup_750cx 51 - mtlr r4 51 + mtlr r5 52 52 blr 53 53 _GLOBAL(__setup_cpu_750fx) 54 - mflr r4 54 + mflr r5 55 55 bl __init_fpu_registers 56 56 bl setup_common_caches 57 57 bl setup_750_7400_hid0 58 58 bl setup_750fx 59 - mtlr r4 59 + mtlr r5 60 60 blr 61 61 _GLOBAL(__setup_cpu_7400) 62 - mflr r4 62 + mflr r5 63 63 bl __init_fpu_registers 64 64 bl setup_7400_workarounds 65 65 bl setup_common_caches 66 66 bl setup_750_7400_hid0 67 - mtlr r4 67 + mtlr r5 68 68 blr 69 69 _GLOBAL(__setup_cpu_7410) 70 - mflr r4 70 + mflr r5 71 71 bl __init_fpu_registers 72 72 bl setup_7410_workarounds 73 73 bl setup_common_caches 74 74 bl setup_750_7400_hid0 75 75 li r3,0 76 76 mtspr SPRN_L2CR2,r3 77 - mtlr r4 77 + mtlr r5 78 78 blr 79 79 _GLOBAL(__setup_cpu_745x) 80 - mflr r4 80 + mflr r5 81 81 bl setup_common_caches 82 82 bl setup_745x_specifics 83 - mtlr r4 83 + mtlr r5 84 84 blr 85 85 86 86 /* Enable caches for 603's, 604, 750 & 7400 */ ··· 194 194 cror 4*cr0+eq,4*cr0+eq,4*cr1+eq 195 195 cror 4*cr0+eq,4*cr0+eq,4*cr2+eq 196 196 bnelr 197 - lwz r6,CPU_SPEC_FEATURES(r5) 197 + lwz r6,CPU_SPEC_FEATURES(r4) 198 198 li r7,CPU_FTR_CAN_NAP 199 199 andc r6,r6,r7 200 - stw r6,CPU_SPEC_FEATURES(r5) 200 + stw r6,CPU_SPEC_FEATURES(r4) 201 201 blr 202 202 203 203 /* 750fx specific ··· 225 225 andis. r11,r11,L3CR_L3E@h 226 226 beq 1f 227 227 END_FTR_SECTION_IFSET(CPU_FTR_L3CR) 228 - lwz r6,CPU_SPEC_FEATURES(r5) 228 + lwz r6,CPU_SPEC_FEATURES(r4) 229 229 andi. r0,r6,CPU_FTR_L3_DISABLE_NAP 230 230 beq 1f 231 231 li r7,CPU_FTR_CAN_NAP 232 232 andc r6,r6,r7 233 - stw r6,CPU_SPEC_FEATURES(r5) 233 + stw r6,CPU_SPEC_FEATURES(r4) 234 234 1: 235 235 mfspr r11,SPRN_HID0 236 236
+2 -2
arch/powerpc/kernel/cputable.c
··· 2076 2076 * pointer on ppc64 and booke as we are running at 0 in real mode 2077 2077 * on ppc64 and reloc_offset is always 0 on booke. 2078 2078 */ 2079 - if (s->cpu_setup) { 2080 - s->cpu_setup(offset, s); 2079 + if (t->cpu_setup) { 2080 + t->cpu_setup(offset, t); 2081 2081 } 2082 2082 #endif /* CONFIG_PPC64 || CONFIG_BOOKE */ 2083 2083 }
+30 -25
arch/powerpc/mm/numa.c
··· 186 186 dbg("removing cpu %lu from node %d\n", cpu, node); 187 187 188 188 if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) { 189 - cpumask_set_cpu(cpu, node_to_cpumask_map[node]); 189 + cpumask_clear_cpu(cpu, node_to_cpumask_map[node]); 190 190 } else { 191 191 printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n", 192 192 cpu, node); ··· 1289 1289 } 1290 1290 #endif /* CONFIG_MEMORY_HOTPLUG */ 1291 1291 1292 - /* Vrtual Processor Home Node (VPHN) support */ 1292 + /* Virtual Processor Home Node (VPHN) support */ 1293 1293 #ifdef CONFIG_PPC_SPLPAR 1294 - #define VPHN_NR_CHANGE_CTRS (8) 1295 - static u8 vphn_cpu_change_counts[NR_CPUS][VPHN_NR_CHANGE_CTRS]; 1294 + static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS]; 1296 1295 static cpumask_t cpu_associativity_changes_mask; 1297 1296 static int vphn_enabled; 1298 1297 static void set_topology_timer(void); ··· 1302 1303 */ 1303 1304 static void setup_cpu_associativity_change_counters(void) 1304 1305 { 1305 - int cpu = 0; 1306 + int cpu; 1307 + 1308 + /* The VPHN feature supports a maximum of 8 reference points */ 1309 + BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8); 1306 1310 1307 1311 for_each_possible_cpu(cpu) { 1308 - int i = 0; 1312 + int i; 1309 1313 u8 *counts = vphn_cpu_change_counts[cpu]; 1310 1314 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts; 1311 1315 1312 - for (i = 0; i < VPHN_NR_CHANGE_CTRS; i++) { 1316 + for (i = 0; i < distance_ref_points_depth; i++) 1313 1317 counts[i] = hypervisor_counts[i]; 1314 - } 1315 1318 } 1316 1319 } 1317 1320 ··· 1330 1329 */ 1331 1330 static int update_cpu_associativity_changes_mask(void) 1332 1331 { 1333 - int cpu = 0, nr_cpus = 0; 1332 + int cpu, nr_cpus = 0; 1334 1333 cpumask_t *changes = &cpu_associativity_changes_mask; 1335 1334 1336 1335 cpumask_clear(changes); ··· 1340 1339 u8 *counts = vphn_cpu_change_counts[cpu]; 1341 1340 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts; 1342 1341 1343 - for (i = 0; i < VPHN_NR_CHANGE_CTRS; i++) { 1344 - if (hypervisor_counts[i] > counts[i]) { 1342 + for (i = 0; i < distance_ref_points_depth; i++) { 1343 + if (hypervisor_counts[i] != counts[i]) { 1345 1344 counts[i] = hypervisor_counts[i]; 1346 1345 changed = 1; 1347 1346 } ··· 1355 1354 return nr_cpus; 1356 1355 } 1357 1356 1358 - /* 6 64-bit registers unpacked into 12 32-bit associativity values */ 1359 - #define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32)) 1357 + /* 1358 + * 6 64-bit registers unpacked into 12 32-bit associativity values. To form 1359 + * the complete property we have to add the length in the first cell. 1360 + */ 1361 + #define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32) + 1) 1360 1362 1361 1363 /* 1362 1364 * Convert the associativity domain numbers returned from the hypervisor ··· 1367 1363 */ 1368 1364 static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked) 1369 1365 { 1370 - int i = 0; 1371 - int nr_assoc_doms = 0; 1366 + int i, nr_assoc_doms = 0; 1372 1367 const u16 *field = (const u16*) packed; 1373 1368 1374 1369 #define VPHN_FIELD_UNUSED (0xffff) 1375 1370 #define VPHN_FIELD_MSB (0x8000) 1376 1371 #define VPHN_FIELD_MASK (~VPHN_FIELD_MSB) 1377 1372 1378 - for (i = 0; i < VPHN_ASSOC_BUFSIZE; i++) { 1373 + for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) { 1379 1374 if (*field == VPHN_FIELD_UNUSED) { 1380 1375 /* All significant fields processed, and remaining 1381 1376 * fields contain the reserved value of all 1's. ··· 1382 1379 */ 1383 1380 unpacked[i] = *((u32*)field); 1384 1381 field += 2; 1385 - } 1386 - else if (*field & VPHN_FIELD_MSB) { 1382 + } else if (*field & VPHN_FIELD_MSB) { 1387 1383 /* Data is in the lower 15 bits of this field */ 1388 1384 unpacked[i] = *field & VPHN_FIELD_MASK; 1389 1385 field++; 1390 1386 nr_assoc_doms++; 1391 - } 1392 - else { 1387 + } else { 1393 1388 /* Data is in the lower 15 bits of this field 1394 1389 * concatenated with the next 16 bit field 1395 1390 */ ··· 1396 1395 nr_assoc_doms++; 1397 1396 } 1398 1397 } 1398 + 1399 + /* The first cell contains the length of the property */ 1400 + unpacked[0] = nr_assoc_doms; 1399 1401 1400 1402 return nr_assoc_doms; 1401 1403 } ··· 1409 1405 */ 1410 1406 static long hcall_vphn(unsigned long cpu, unsigned int *associativity) 1411 1407 { 1412 - long rc = 0; 1408 + long rc; 1413 1409 long retbuf[PLPAR_HCALL9_BUFSIZE] = {0}; 1414 1410 u64 flags = 1; 1415 1411 int hwcpu = get_hard_smp_processor_id(cpu); ··· 1423 1419 static long vphn_get_associativity(unsigned long cpu, 1424 1420 unsigned int *associativity) 1425 1421 { 1426 - long rc = 0; 1422 + long rc; 1427 1423 1428 1424 rc = hcall_vphn(cpu, associativity); 1429 1425 ··· 1449 1445 */ 1450 1446 int arch_update_cpu_topology(void) 1451 1447 { 1452 - int cpu = 0, nid = 0, old_nid = 0; 1448 + int cpu, nid, old_nid; 1453 1449 unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0}; 1454 - struct sys_device *sysdev = NULL; 1450 + struct sys_device *sysdev; 1455 1451 1456 1452 for_each_cpu_mask(cpu, cpu_associativity_changes_mask) { 1457 1453 vphn_get_associativity(cpu, associativity); ··· 1516 1512 { 1517 1513 int rc = 0; 1518 1514 1519 - if (firmware_has_feature(FW_FEATURE_VPHN)) { 1515 + if (firmware_has_feature(FW_FEATURE_VPHN) && 1516 + get_lppaca()->shared_proc) { 1520 1517 vphn_enabled = 1; 1521 1518 setup_cpu_associativity_change_counters(); 1522 1519 init_timer_deferrable(&topology_timer);
+37
arch/powerpc/platforms/pseries/lpar.c
··· 713 713 /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ 714 714 extern long hcall_tracepoint_refcount; 715 715 716 + /* 717 + * Since the tracing code might execute hcalls we need to guard against 718 + * recursion. One example of this are spinlocks calling H_YIELD on 719 + * shared processor partitions. 720 + */ 721 + static DEFINE_PER_CPU(unsigned int, hcall_trace_depth); 722 + 716 723 void hcall_tracepoint_regfunc(void) 717 724 { 718 725 hcall_tracepoint_refcount++; ··· 732 725 733 726 void __trace_hcall_entry(unsigned long opcode, unsigned long *args) 734 727 { 728 + unsigned long flags; 729 + unsigned int *depth; 730 + 731 + local_irq_save(flags); 732 + 733 + depth = &__get_cpu_var(hcall_trace_depth); 734 + 735 + if (*depth) 736 + goto out; 737 + 738 + (*depth)++; 735 739 trace_hcall_entry(opcode, args); 740 + (*depth)--; 741 + 742 + out: 743 + local_irq_restore(flags); 736 744 } 737 745 738 746 void __trace_hcall_exit(long opcode, unsigned long retval, 739 747 unsigned long *retbuf) 740 748 { 749 + unsigned long flags; 750 + unsigned int *depth; 751 + 752 + local_irq_save(flags); 753 + 754 + depth = &__get_cpu_var(hcall_trace_depth); 755 + 756 + if (*depth) 757 + goto out; 758 + 759 + (*depth)++; 741 760 trace_hcall_exit(opcode, retval, retbuf); 761 + (*depth)--; 762 + 763 + out: 764 + local_irq_restore(flags); 742 765 } 743 766 #endif