Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

ttm/pool: port to list_lru. (v2)

This is an initial port of the TTM pools for
write combined and uncached pages to use the list_lru.

This makes the pool's more NUMA aware and avoids
needing separate NUMA pools (later commit enables this).

Cc: Christian Koenig <christian.koenig@amd.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Dave Chinner <david@fromorbit.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>

+84 -50
+1 -1
drivers/gpu/drm/ttm/tests/ttm_device_test.c
··· 176 176 177 177 if (ttm_pool_uses_dma_alloc(pool)) 178 178 KUNIT_ASSERT_FALSE(test, 179 - list_empty(&pt.pages)); 179 + !list_lru_count(&pt.pages)); 180 180 } 181 181 } 182 182 }
+16 -16
drivers/gpu/drm/ttm/tests/ttm_pool_test.c
··· 248 248 pool = ttm_pool_pre_populated(test, size, caching); 249 249 250 250 pt = &pool->caching[caching].orders[order]; 251 - KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages)); 251 + KUNIT_ASSERT_FALSE(test, !list_lru_count(&pt->pages)); 252 252 253 253 tt = ttm_tt_kunit_init(test, 0, caching, size); 254 254 KUNIT_ASSERT_NOT_NULL(test, tt); ··· 256 256 err = ttm_pool_alloc(pool, tt, &simple_ctx); 257 257 KUNIT_ASSERT_EQ(test, err, 0); 258 258 259 - KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages)); 259 + KUNIT_ASSERT_TRUE(test, !list_lru_count(&pt->pages)); 260 260 261 261 ttm_pool_free(pool, tt); 262 262 ttm_tt_fini(tt); ··· 282 282 tt = ttm_tt_kunit_init(test, 0, tt_caching, size); 283 283 KUNIT_ASSERT_NOT_NULL(test, tt); 284 284 285 - KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages)); 286 - KUNIT_ASSERT_TRUE(test, list_empty(&pt_tt->pages)); 285 + KUNIT_ASSERT_FALSE(test, !list_lru_count(&pt_pool->pages)); 286 + KUNIT_ASSERT_TRUE(test, !list_lru_count(&pt_tt->pages)); 287 287 288 288 err = ttm_pool_alloc(pool, tt, &simple_ctx); 289 289 KUNIT_ASSERT_EQ(test, err, 0); ··· 291 291 ttm_pool_free(pool, tt); 292 292 ttm_tt_fini(tt); 293 293 294 - KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages)); 295 - KUNIT_ASSERT_FALSE(test, list_empty(&pt_tt->pages)); 294 + KUNIT_ASSERT_FALSE(test, !list_lru_count(&pt_pool->pages)); 295 + KUNIT_ASSERT_FALSE(test, !list_lru_count(&pt_tt->pages)); 296 296 297 297 ttm_pool_fini(pool); 298 298 } ··· 316 316 tt = ttm_tt_kunit_init(test, 0, caching, snd_size); 317 317 KUNIT_ASSERT_NOT_NULL(test, tt); 318 318 319 - KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages)); 320 - KUNIT_ASSERT_TRUE(test, list_empty(&pt_tt->pages)); 319 + KUNIT_ASSERT_FALSE(test, !list_lru_count(&pt_pool->pages)); 320 + KUNIT_ASSERT_TRUE(test, !list_lru_count(&pt_tt->pages)); 321 321 322 322 err = ttm_pool_alloc(pool, tt, &simple_ctx); 323 323 KUNIT_ASSERT_EQ(test, err, 0); ··· 325 325 ttm_pool_free(pool, tt); 326 326 ttm_tt_fini(tt); 327 327 328 - KUNIT_ASSERT_FALSE(test, list_empty(&pt_pool->pages)); 329 - KUNIT_ASSERT_FALSE(test, list_empty(&pt_tt->pages)); 328 + KUNIT_ASSERT_FALSE(test, !list_lru_count(&pt_pool->pages)); 329 + KUNIT_ASSERT_FALSE(test, !list_lru_count(&pt_tt->pages)); 330 330 331 331 ttm_pool_fini(pool); 332 332 } ··· 352 352 ttm_pool_alloc(pool, tt, &simple_ctx); 353 353 354 354 pt = &pool->caching[caching].orders[order]; 355 - KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages)); 355 + KUNIT_ASSERT_TRUE(test, !list_lru_count(&pt->pages)); 356 356 357 357 ttm_pool_free(pool, tt); 358 358 ttm_tt_fini(tt); 359 359 360 - KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages)); 360 + KUNIT_ASSERT_FALSE(test, !list_lru_count(&pt->pages)); 361 361 362 362 ttm_pool_fini(pool); 363 363 } ··· 383 383 ttm_pool_alloc(pool, tt, &simple_ctx); 384 384 385 385 pt = &pool->caching[caching].orders[order]; 386 - KUNIT_ASSERT_TRUE(test, list_is_singular(&pt->pages)); 386 + KUNIT_ASSERT_TRUE(test, list_lru_count(&pt->pages) == 1); 387 387 388 388 ttm_pool_free(pool, tt); 389 389 ttm_tt_fini(tt); 390 390 391 - KUNIT_ASSERT_TRUE(test, list_is_singular(&pt->pages)); 391 + KUNIT_ASSERT_TRUE(test, list_lru_count(&pt->pages) == 1); 392 392 393 393 ttm_pool_fini(pool); 394 394 } ··· 404 404 pool = ttm_pool_pre_populated(test, size, caching); 405 405 pt = &pool->caching[caching].orders[order]; 406 406 407 - KUNIT_ASSERT_FALSE(test, list_empty(&pt->pages)); 407 + KUNIT_ASSERT_FALSE(test, !list_lru_count(&pt->pages)); 408 408 409 409 ttm_pool_fini(pool); 410 410 411 - KUNIT_ASSERT_TRUE(test, list_empty(&pt->pages)); 411 + KUNIT_ASSERT_TRUE(test, !list_lru_count(&pt->pages)); 412 412 } 413 413 414 414 static struct kunit_case ttm_pool_test_cases[] = {
+63 -29
drivers/gpu/drm/ttm/ttm_pool.c
··· 132 132 static struct shrinker *mm_shrinker; 133 133 static DECLARE_RWSEM(pool_shrink_rwsem); 134 134 135 + static int ttm_pool_nid(struct ttm_pool *pool) 136 + { 137 + int nid = NUMA_NO_NODE; 138 + if (pool) 139 + nid = pool->nid; 140 + if (nid == NUMA_NO_NODE) 141 + nid = numa_node_id(); 142 + return nid; 143 + } 144 + 135 145 /* Allocate pages of size 1 << order with the given gfp_flags */ 136 146 static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags, 137 147 unsigned int order) ··· 307 297 clear_page(page_address(p + i)); 308 298 } 309 299 310 - spin_lock(&pt->lock); 311 - list_add(&p->lru, &pt->pages); 312 - spin_unlock(&pt->lock); 300 + INIT_LIST_HEAD(&p->lru); 301 + rcu_read_lock(); 302 + list_lru_add(&pt->pages, &p->lru, page_to_nid(p), NULL); 303 + rcu_read_unlock(); 313 304 atomic_long_add(1 << pt->order, &allocated_pages); 314 305 315 306 mod_lruvec_page_state(p, NR_GPU_ACTIVE, -num_pages); 316 307 mod_lruvec_page_state(p, NR_GPU_RECLAIM, num_pages); 317 308 } 318 309 319 - /* Take pages from a specific pool_type, return NULL when nothing available */ 320 - static struct page *ttm_pool_type_take(struct ttm_pool_type *pt) 310 + static enum lru_status take_one_from_lru(struct list_head *item, 311 + struct list_lru_one *list, 312 + void *cb_arg) 321 313 { 322 - struct page *p; 314 + struct page **out_page = cb_arg; 315 + struct page *p = container_of(item, struct page, lru); 316 + list_lru_isolate(list, item); 323 317 324 - spin_lock(&pt->lock); 325 - p = list_first_entry_or_null(&pt->pages, typeof(*p), lru); 326 - if (p) { 318 + *out_page = p; 319 + return LRU_REMOVED; 320 + } 321 + 322 + /* Take pages from a specific pool_type, return NULL when nothing available */ 323 + static struct page *ttm_pool_type_take(struct ttm_pool_type *pt, int nid) 324 + { 325 + int ret; 326 + struct page *p = NULL; 327 + unsigned long nr_to_walk = 1; 328 + 329 + ret = list_lru_walk_node(&pt->pages, nid, take_one_from_lru, (void *)&p, &nr_to_walk); 330 + if (ret == 1 && p) { 327 331 atomic_long_sub(1 << pt->order, &allocated_pages); 328 332 mod_lruvec_page_state(p, NR_GPU_ACTIVE, (1 << pt->order)); 329 333 mod_lruvec_page_state(p, NR_GPU_RECLAIM, -(1 << pt->order)); 330 - list_del(&p->lru); 331 334 } 332 - spin_unlock(&pt->lock); 333 - 334 335 return p; 335 336 } 336 337 ··· 352 331 pt->pool = pool; 353 332 pt->caching = caching; 354 333 pt->order = order; 355 - spin_lock_init(&pt->lock); 356 - INIT_LIST_HEAD(&pt->pages); 334 + list_lru_init(&pt->pages); 357 335 358 336 spin_lock(&shrinker_lock); 359 337 list_add_tail(&pt->shrinker_list, &shrinker_list); 360 338 spin_unlock(&shrinker_lock); 361 339 } 362 340 341 + static enum lru_status pool_move_to_dispose_list(struct list_head *item, 342 + struct list_lru_one *list, 343 + void *cb_arg) 344 + { 345 + struct list_head *dispose = cb_arg; 346 + 347 + list_lru_isolate_move(list, item, dispose); 348 + 349 + return LRU_REMOVED; 350 + } 351 + 352 + static void ttm_pool_dispose_list(struct ttm_pool_type *pt, 353 + struct list_head *dispose) 354 + { 355 + while (!list_empty(dispose)) { 356 + struct page *p; 357 + p = list_first_entry(dispose, struct page, lru); 358 + list_del_init(&p->lru); 359 + atomic_long_sub(1 << pt->order, &allocated_pages); 360 + ttm_pool_free_page(pt->pool, pt->caching, pt->order, p, true); 361 + } 362 + } 363 + 363 364 /* Remove a pool_type from the global shrinker list and free all pages */ 364 365 static void ttm_pool_type_fini(struct ttm_pool_type *pt) 365 366 { 366 - struct page *p; 367 + LIST_HEAD(dispose); 367 368 368 369 spin_lock(&shrinker_lock); 369 370 list_del(&pt->shrinker_list); 370 371 spin_unlock(&shrinker_lock); 371 372 372 - while ((p = ttm_pool_type_take(pt))) 373 - ttm_pool_free_page(pt->pool, pt->caching, pt->order, p, true); 373 + list_lru_walk(&pt->pages, pool_move_to_dispose_list, &dispose, LONG_MAX); 374 + ttm_pool_dispose_list(pt, &dispose); 374 375 } 375 376 376 377 /* Return the pool_type to use for the given caching and order */ ··· 442 399 list_move_tail(&pt->shrinker_list, &shrinker_list); 443 400 spin_unlock(&shrinker_lock); 444 401 445 - p = ttm_pool_type_take(pt); 402 + p = ttm_pool_type_take(pt, ttm_pool_nid(pt->pool)); 446 403 if (p) { 447 404 ttm_pool_free_page(pt->pool, pt->caching, pt->order, p, true); 448 405 num_pages = 1 << pt->order; ··· 799 756 p = NULL; 800 757 pt = ttm_pool_select_type(pool, page_caching, order); 801 758 if (pt && allow_pools) 802 - p = ttm_pool_type_take(pt); 759 + p = ttm_pool_type_take(pt, ttm_pool_nid(pool)); 803 760 /* 804 761 * If that fails or previously failed, allocate from system. 805 762 * Note that this also disallows additional pool allocations using ··· 1228 1185 /* Count the number of pages available in a pool_type */ 1229 1186 static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt) 1230 1187 { 1231 - unsigned int count = 0; 1232 - struct page *p; 1233 - 1234 - spin_lock(&pt->lock); 1235 - /* Only used for debugfs, the overhead doesn't matter */ 1236 - list_for_each_entry(p, &pt->pages, lru) 1237 - ++count; 1238 - spin_unlock(&pt->lock); 1239 - 1240 - return count; 1188 + return list_lru_count(&pt->pages); 1241 1189 } 1242 1190 1243 1191 /* Print a nice header for the order */
+3 -4
include/drm/ttm/ttm_pool.h
··· 29 29 #include <linux/mmzone.h> 30 30 #include <linux/llist.h> 31 31 #include <linux/spinlock.h> 32 + #include <linux/list_lru.h> 32 33 #include <drm/ttm/ttm_caching.h> 33 34 34 35 struct device; ··· 46 45 * @order: the allocation order our pages have 47 46 * @caching: the caching type our pages have 48 47 * @shrinker_list: our place on the global shrinker list 49 - * @lock: protection of the page list 50 - * @pages: the list of pages in the pool 48 + * @pages: the lru_list of pages in the pool 51 49 */ 52 50 struct ttm_pool_type { 53 51 struct ttm_pool *pool; ··· 55 55 56 56 struct list_head shrinker_list; 57 57 58 - spinlock_t lock; 59 - struct list_head pages; 58 + struct list_lru pages; 60 59 }; 61 60 62 61 /**
+1
mm/list_lru.c
··· 179 179 unlock_list_lru(l, false); 180 180 return false; 181 181 } 182 + EXPORT_SYMBOL_GPL(list_lru_add); 182 183 183 184 bool list_lru_add_obj(struct list_lru *lru, struct list_head *item) 184 185 {