Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'slub/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6

* 'slub/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6:
SLUB: Allow full duplication of kmalloc array for 390
slub: move kmem_cache_node into it's own cacheline

+16 -30
+4 -7
include/linux/slub_def.h
··· 75 75 int offset; /* Free pointer offset. */ 76 76 struct kmem_cache_order_objects oo; 77 77 78 - /* 79 - * Avoid an extra cache line for UP, SMP and for the node local to 80 - * struct kmem_cache. 81 - */ 82 - struct kmem_cache_node local_node; 83 - 84 78 /* Allocation and freeing of slabs */ 85 79 struct kmem_cache_order_objects max; 86 80 struct kmem_cache_order_objects min; ··· 96 102 */ 97 103 int remote_node_defrag_ratio; 98 104 struct kmem_cache_node *node[MAX_NUMNODES]; 105 + #else 106 + /* Avoid an extra cache line for UP */ 107 + struct kmem_cache_node local_node; 99 108 #endif 100 109 }; 101 110 ··· 137 140 #ifdef CONFIG_ZONE_DMA 138 141 #define SLUB_DMA __GFP_DMA 139 142 /* Reserve extra caches for potential DMA use */ 140 - #define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT - 6) 143 + #define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT) 141 144 #else 142 145 /* Disable DMA functionality */ 143 146 #define SLUB_DMA (__force gfp_t)0
+12 -23
mm/slub.c
··· 2137 2137 2138 2138 for_each_node_state(node, N_NORMAL_MEMORY) { 2139 2139 struct kmem_cache_node *n = s->node[node]; 2140 - if (n && n != &s->local_node) 2140 + if (n) 2141 2141 kmem_cache_free(kmalloc_caches, n); 2142 2142 s->node[node] = NULL; 2143 2143 } ··· 2146 2146 static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) 2147 2147 { 2148 2148 int node; 2149 - int local_node; 2150 - 2151 - if (slab_state >= UP && (s < kmalloc_caches || 2152 - s >= kmalloc_caches + KMALLOC_CACHES)) 2153 - local_node = page_to_nid(virt_to_page(s)); 2154 - else 2155 - local_node = 0; 2156 2149 2157 2150 for_each_node_state(node, N_NORMAL_MEMORY) { 2158 2151 struct kmem_cache_node *n; 2159 2152 2160 - if (local_node == node) 2161 - n = &s->local_node; 2162 - else { 2163 - if (slab_state == DOWN) { 2164 - early_kmem_cache_node_alloc(gfpflags, node); 2165 - continue; 2166 - } 2167 - n = kmem_cache_alloc_node(kmalloc_caches, 2168 - gfpflags, node); 2169 - 2170 - if (!n) { 2171 - free_kmem_cache_nodes(s); 2172 - return 0; 2173 - } 2174 - 2153 + if (slab_state == DOWN) { 2154 + early_kmem_cache_node_alloc(gfpflags, node); 2155 + continue; 2175 2156 } 2157 + n = kmem_cache_alloc_node(kmalloc_caches, 2158 + gfpflags, node); 2159 + 2160 + if (!n) { 2161 + free_kmem_cache_nodes(s); 2162 + return 0; 2163 + } 2164 + 2176 2165 s->node[node] = n; 2177 2166 init_kmem_cache_node(n, s); 2178 2167 }