Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

ttm/pool: make pool shrinker NUMA aware (v2)

This enable NUMA awareness for the shrinker on the
ttm pools.

Cc: Christian Koenig <christian.koenig@amd.com>
Cc: Dave Chinner <david@fromorbit.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>

+21 -17
+21 -17
drivers/gpu/drm/ttm/ttm_pool.c
··· 423 423 return NULL; 424 424 } 425 425 426 - /* Free pages using the global shrinker list */ 427 - static unsigned int ttm_pool_shrink(void) 426 + /* Free pages using the per-node shrinker list */ 427 + static unsigned int ttm_pool_shrink(int nid, unsigned long num_to_free) 428 428 { 429 + LIST_HEAD(dispose); 429 430 struct ttm_pool_type *pt; 430 431 unsigned int num_pages; 431 - struct page *p; 432 432 433 433 down_read(&pool_shrink_rwsem); 434 434 spin_lock(&shrinker_lock); ··· 436 436 list_move_tail(&pt->shrinker_list, &shrinker_list); 437 437 spin_unlock(&shrinker_lock); 438 438 439 - p = ttm_pool_type_take(pt, ttm_pool_nid(pt->pool)); 440 - if (p) { 441 - ttm_pool_free_page(pt->pool, pt->caching, pt->order, p, true); 442 - num_pages = 1 << pt->order; 443 - } else { 444 - num_pages = 0; 445 - } 439 + num_pages = list_lru_walk_node(&pt->pages, nid, pool_move_to_dispose_list, &dispose, &num_to_free); 440 + num_pages *= 1 << pt->order; 441 + 442 + ttm_pool_dispose_list(pt, &dispose); 446 443 up_read(&pool_shrink_rwsem); 447 444 448 445 return num_pages; ··· 791 794 pt = ttm_pool_select_type(pool, page_caching, order); 792 795 if (pt && allow_pools) 793 796 p = ttm_pool_type_take(pt, ttm_pool_nid(pool)); 797 + 794 798 /* 795 799 * If that fails or previously failed, allocate from system. 796 800 * Note that this also disallows additional pool allocations using ··· 942 944 { 943 945 ttm_pool_free_range(pool, tt, tt->caching, 0, tt->num_pages); 944 946 945 - while (atomic_long_read(&allocated_pages) > page_pool_size) 946 - ttm_pool_shrink(); 947 + while (atomic_long_read(&allocated_pages) > page_pool_size) { 948 + unsigned long diff = atomic_long_read(&allocated_pages) - page_pool_size; 949 + ttm_pool_shrink(ttm_pool_nid(pool), diff); 950 + } 947 951 } 948 952 EXPORT_SYMBOL(ttm_pool_free); 949 953 ··· 1200 1200 unsigned long num_freed = 0; 1201 1201 1202 1202 do 1203 - num_freed += ttm_pool_shrink(); 1203 + num_freed += ttm_pool_shrink(sc->nid, sc->nr_to_scan); 1204 1204 while (num_freed < sc->nr_to_scan && 1205 1205 atomic_long_read(&allocated_pages)); 1206 1206 ··· 1328 1328 .nr_to_scan = TTM_SHRINKER_BATCH, 1329 1329 }; 1330 1330 unsigned long count; 1331 + int nid; 1331 1332 1332 1333 fs_reclaim_acquire(GFP_KERNEL); 1333 - count = ttm_pool_shrinker_count(mm_shrinker, &sc); 1334 - seq_printf(m, "%lu/%lu\n", count, 1335 - ttm_pool_shrinker_scan(mm_shrinker, &sc)); 1334 + for_each_node(nid) { 1335 + sc.nid = nid; 1336 + count = ttm_pool_shrinker_count(mm_shrinker, &sc); 1337 + seq_printf(m, "%d: %lu/%lu\n", nid, count, 1338 + ttm_pool_shrinker_scan(mm_shrinker, &sc)); 1339 + } 1336 1340 fs_reclaim_release(GFP_KERNEL); 1337 1341 1338 1342 return 0; ··· 1384 1380 #endif 1385 1381 #endif 1386 1382 1387 - mm_shrinker = shrinker_alloc(0, "drm-ttm_pool"); 1383 + mm_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE, "drm-ttm_pool"); 1388 1384 if (!mm_shrinker) 1389 1385 return -ENOMEM; 1390 1386