Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

drm/ttm: use gpu mm stats to track gpu memory allocations. (v4)

This uses the newly introduced per-node gpu tracking stats,
to track GPU memory allocated via TTM and reclaimable memory in
the TTM page pools.

These stats will be useful later for system information and
later when mem cgroups are integrated.

Cc: Christian Koenig <christian.koenig@amd.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: linux-mm@kvack.org
Cc: Andrew Morton <akpm@linux-foundation.org>
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>

+15 -6
+15 -6
drivers/gpu/drm/ttm/ttm_pool.c
··· 159 159 160 160 if (!ttm_pool_uses_dma_alloc(pool)) { 161 161 p = alloc_pages_node(pool->nid, gfp_flags, order); 162 - if (p) 162 + if (p) { 163 163 p->private = order; 164 + mod_lruvec_page_state(p, NR_GPU_ACTIVE, 1 << order); 165 + } 164 166 return p; 165 167 } 166 168 ··· 197 195 198 196 /* Reset the caching and pages of size 1 << order */ 199 197 static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching, 200 - unsigned int order, struct page *p) 198 + unsigned int order, struct page *p, bool reclaim) 201 199 { 202 200 unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS; 203 201 struct ttm_pool_dma *dma; ··· 212 210 #endif 213 211 214 212 if (!pool || !ttm_pool_uses_dma_alloc(pool)) { 213 + mod_lruvec_page_state(p, reclaim ? NR_GPU_RECLAIM : NR_GPU_ACTIVE, 214 + -(1 << order)); 215 215 __free_pages(p, order); 216 216 return; 217 217 } ··· 301 297 list_add(&p->lru, &pt->pages); 302 298 spin_unlock(&pt->lock); 303 299 atomic_long_add(1 << pt->order, &allocated_pages); 300 + 301 + mod_lruvec_page_state(p, NR_GPU_ACTIVE, -num_pages); 302 + mod_lruvec_page_state(p, NR_GPU_RECLAIM, num_pages); 304 303 } 305 304 306 305 /* Take pages from a specific pool_type, return NULL when nothing available */ ··· 315 308 p = list_first_entry_or_null(&pt->pages, typeof(*p), lru); 316 309 if (p) { 317 310 atomic_long_sub(1 << pt->order, &allocated_pages); 311 + mod_lruvec_page_state(p, NR_GPU_ACTIVE, (1 << pt->order)); 312 + mod_lruvec_page_state(p, NR_GPU_RECLAIM, -(1 << pt->order)); 318 313 list_del(&p->lru); 319 314 } 320 315 spin_unlock(&pt->lock); ··· 349 340 spin_unlock(&shrinker_lock); 350 341 351 342 while ((p = ttm_pool_type_take(pt))) 352 - ttm_pool_free_page(pt->pool, pt->caching, pt->order, p); 343 + ttm_pool_free_page(pt->pool, pt->caching, pt->order, p, true); 353 344 } 354 345 355 346 /* Return the pool_type to use for the given caching and order */ ··· 401 392 402 393 p = ttm_pool_type_take(pt); 403 394 if (p) { 404 - ttm_pool_free_page(pt->pool, pt->caching, pt->order, p); 395 + ttm_pool_free_page(pt->pool, pt->caching, pt->order, p, true); 405 396 num_pages = 1 << pt->order; 406 397 } else { 407 398 num_pages = 0; ··· 493 484 if (pt) 494 485 ttm_pool_type_give(pt, page); 495 486 else 496 - ttm_pool_free_page(pool, caching, order, page); 487 + ttm_pool_free_page(pool, caching, order, page, false); 497 488 498 489 return nr; 499 490 } ··· 801 792 return 0; 802 793 803 794 error_free_page: 804 - ttm_pool_free_page(pool, page_caching, order, p); 795 + ttm_pool_free_page(pool, page_caching, order, p, false); 805 796 806 797 error_free_all: 807 798 if (tt->restore)