Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc

* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc:
[POWERPC] Fixes for the SLB shadow buffer code
[POWERPC] Fix a compile warning in powermac/feature.c
[POWERPC] Fix a compile warning in pci_32.c
[POWERPC] Fix parse_drconf_memory() for 64-bit start addresses
[POWERPC] Fix num_cpus calculation in smp_call_function_map()
[POWERPC] ps3: Fix section mismatch in ps3/setup.c
[POWERPC] spufs: Fix affinity after introduction of node_allowed() calls
[POWERPC] Fix special PTE code for secondary hash bucket
[POWERPC] Expand RPN field to 34 bits when using 64k pages

+46 -31
+3
arch/powerpc/kernel/entry_64.S
··· 389 389 ld r9,PACA_SLBSHADOWPTR(r13) 390 390 li r12,0 391 391 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */ 392 + eieio 392 393 std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */ 394 + eieio 393 395 std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */ 396 + eieio 394 397 395 398 slbie r6 396 399 slbie r6 /* Workaround POWER5 < DD2.1 issue */
+4 -1
arch/powerpc/kernel/pci_32.c
··· 581 581 if ((r->flags & IORESOURCE_UNSET) && r->end && 582 582 (!ppc_md.pcibios_enable_device_hook || 583 583 !ppc_md.pcibios_enable_device_hook(dev, 1))) { 584 + int rc; 585 + 584 586 r->flags &= ~IORESOURCE_UNSET; 585 - pci_assign_resource(dev, idx); 587 + rc = pci_assign_resource(dev, idx); 588 + BUG_ON(rc); 586 589 } 587 590 } 588 591
+3 -6
arch/powerpc/kernel/smp.c
··· 212 212 atomic_set(&data.finished, 0); 213 213 214 214 spin_lock(&call_lock); 215 - /* Must grab online cpu count with preempt disabled, otherwise 216 - * it can change. */ 217 - num_cpus = num_online_cpus() - 1; 218 - if (!num_cpus) 219 - goto done; 220 215 221 216 /* remove 'self' from the map */ 222 217 if (cpu_isset(smp_processor_id(), map)) ··· 219 224 220 225 /* sanity check the map, remove any non-online processors. */ 221 226 cpus_and(map, map, cpu_online_map); 222 - if (cpus_empty(map)) 227 + 228 + num_cpus = cpus_weight(map); 229 + if (!num_cpus) 223 230 goto done; 224 231 225 232 call_data = &data;
+4 -2
arch/powerpc/mm/hash_low_64.S
··· 472 472 /* Now try secondary slot */ 473 473 474 474 /* real page number in r5, PTE RPN value + index */ 475 - rldicl r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT 475 + andis. r0,r31,_PAGE_4K_PFN@h 476 + srdi r5,r31,PTE_RPN_SHIFT 477 + bne- 3f 476 478 sldi r5,r5,PAGE_SHIFT-HW_PAGE_SHIFT 477 479 add r5,r5,r25 478 - sldi r5,r5,HW_PAGE_SHIFT 480 + 3: sldi r5,r5,HW_PAGE_SHIFT 479 481 480 482 /* Calculate secondary group hash */ 481 483 andc r0,r27,r28
+1 -1
arch/powerpc/mm/hash_utils_64.c
··· 759 759 mmu_psize_defs[mmu_vmalloc_psize].sllp) { 760 760 get_paca()->vmalloc_sllp = 761 761 mmu_psize_defs[mmu_vmalloc_psize].sllp; 762 - slb_flush_and_rebolt(); 762 + slb_vmalloc_update(); 763 763 } 764 764 #endif /* CONFIG_PPC_64K_PAGES */ 765 765
+2 -2
arch/powerpc/mm/numa.c
··· 307 307 const unsigned int *lm, *dm, *aa; 308 308 unsigned int ls, ld, la; 309 309 unsigned int n, aam, aalen; 310 - unsigned long lmb_size, size; 310 + unsigned long lmb_size, size, start; 311 311 int nid, default_nid = 0; 312 - unsigned int start, ai, flags; 312 + unsigned int ai, flags; 313 313 314 314 lm = of_get_property(memory, "ibm,lmb-size", &ls); 315 315 dm = of_get_property(memory, "ibm,dynamic-memory", &ld);
+18 -10
arch/powerpc/mm/slb.c
··· 53 53 return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags; 54 54 } 55 55 56 - static inline void slb_shadow_update(unsigned long esid, unsigned long vsid, 56 + static inline void slb_shadow_update(unsigned long ea, 57 + unsigned long flags, 57 58 unsigned long entry) 58 59 { 59 60 /* ··· 62 61 * updating it. 63 62 */ 64 63 get_slb_shadow()->save_area[entry].esid = 0; 65 - barrier(); 66 - get_slb_shadow()->save_area[entry].vsid = vsid; 67 - barrier(); 68 - get_slb_shadow()->save_area[entry].esid = esid; 69 - 64 + smp_wmb(); 65 + get_slb_shadow()->save_area[entry].vsid = mk_vsid_data(ea, flags); 66 + smp_wmb(); 67 + get_slb_shadow()->save_area[entry].esid = mk_esid_data(ea, entry); 68 + smp_wmb(); 70 69 } 71 70 72 71 static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags, ··· 77 76 * we don't get a stale entry here if we get preempted by PHYP 78 77 * between these two statements. 79 78 */ 80 - slb_shadow_update(mk_esid_data(ea, entry), mk_vsid_data(ea, flags), 81 - entry); 79 + slb_shadow_update(ea, flags, entry); 82 80 83 81 asm volatile("slbmte %0,%1" : 84 82 : "r" (mk_vsid_data(ea, flags)), ··· 104 104 ksp_esid_data &= ~SLB_ESID_V; 105 105 106 106 /* Only third entry (stack) may change here so only resave that */ 107 - slb_shadow_update(ksp_esid_data, 108 - mk_vsid_data(ksp_esid_data, lflags), 2); 107 + slb_shadow_update(get_paca()->kstack, lflags, 2); 109 108 110 109 /* We need to do this all in asm, so we're sure we don't touch 111 110 * the stack between the slbia and rebolting it. */ ··· 120 121 "r"(mk_vsid_data(ksp_esid_data, lflags)), 121 122 "r"(ksp_esid_data) 122 123 : "memory"); 124 + } 125 + 126 + void slb_vmalloc_update(void) 127 + { 128 + unsigned long vflags; 129 + 130 + vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp; 131 + slb_shadow_update(VMALLOC_START, vflags, 1); 132 + slb_flush_and_rebolt(); 123 133 } 124 134 125 135 /* Flush all user entries from the segment table of the current processor. */
+2 -1
arch/powerpc/platforms/cell/spufs/sched.c
··· 351 351 lowest_offset = ctx->aff_offset; 352 352 } 353 353 354 - gang->aff_ref_spu = aff_ref_location(ctx, mem_aff, gs, lowest_offset); 354 + gang->aff_ref_spu = aff_ref_location(gang->aff_ref_ctx, mem_aff, gs, 355 + lowest_offset); 355 356 } 356 357 357 358 static struct spu *ctx_location(struct spu *ref, int offset, int node)
+4 -2
arch/powerpc/platforms/powermac/feature.c
··· 826 826 827 827 if (value) { 828 828 if (pci_device_from_OF_node(node, &pbus, &pid) == 0) 829 - pdev = pci_find_slot(pbus, pid); 829 + pdev = pci_get_bus_and_slot(pbus, pid); 830 830 if (pdev == NULL) 831 831 return 0; 832 832 rc = pci_enable_device(pdev); 833 + if (rc == 0) 834 + pci_set_master(pdev); 835 + pci_dev_put(pdev); 833 836 if (rc) 834 837 return rc; 835 - pci_set_master(pdev); 836 838 } 837 839 return 0; 838 840 }
+1 -1
arch/powerpc/platforms/ps3/setup.c
··· 109 109 110 110 #if defined(CONFIG_FB_PS3) || defined(CONFIG_FB_PS3_MODULE) || \ 111 111 defined(CONFIG_PS3_FLASH) || defined(CONFIG_PS3_FLASH_MODULE) 112 - static void prealloc(struct ps3_prealloc *p) 112 + static void __init prealloc(struct ps3_prealloc *p) 113 113 { 114 114 if (!p->size) 115 115 return;
+1
include/asm-powerpc/mmu-hash64.h
··· 262 262 extern void slb_flush_and_rebolt(void); 263 263 extern void stab_initialize(unsigned long stab); 264 264 265 + extern void slb_vmalloc_update(void); 265 266 #endif /* __ASSEMBLY__ */ 266 267 267 268 /*
+3 -5
include/asm-powerpc/pgtable-64k.h
··· 49 49 50 50 /* Shift to put page number into pte. 51 51 * 52 - * That gives us a max RPN of 32 bits, which means a max of 48 bits 53 - * of addressable physical space. 54 - * We could get 3 more bits here by setting PTE_RPN_SHIFT to 29 but 55 - * 32 makes PTEs more readable for debugging for now :) 52 + * That gives us a max RPN of 34 bits, which means a max of 50 bits 53 + * of addressable physical space, or 46 bits for the special 4k PFNs. 56 54 */ 57 - #define PTE_RPN_SHIFT (32) 55 + #define PTE_RPN_SHIFT (30) 58 56 #define PTE_RPN_MAX (1UL << (64 - PTE_RPN_SHIFT)) 59 57 #define PTE_RPN_MASK (~((1UL<<PTE_RPN_SHIFT)-1)) 60 58