Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

drm/amdgpu: update the functions to use amdgpu version of hmm

At times we need a bo reference for hmm and for that add
a new struct amdgpu_hmm_range which will hold an optional
bo member and hmm_range.

Use amdgpu_hmm_range instead of hmm_range and let the bo
as an optional argument for the caller if they want to
the bo reference to be taken or they want to handle that
explicitly.

Signed-off-by: Sunil Khatri <sunil.khatri@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Sunil Khatri and committed by
Alex Deucher
737da536 071bba96

+56 -44
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
··· 71 71 struct mutex lock; 72 72 struct amdgpu_bo *bo; 73 73 struct dma_buf *dmabuf; 74 - struct hmm_range *range; 74 + struct amdgpu_hmm_range *range; 75 75 struct list_head attachments; 76 76 /* protected by amdkfd_process_info.lock */ 77 77 struct list_head validate_list;
+3 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
··· 1057 1057 struct amdkfd_process_info *process_info = mem->process_info; 1058 1058 struct amdgpu_bo *bo = mem->bo; 1059 1059 struct ttm_operation_ctx ctx = { true, false }; 1060 - struct hmm_range *range; 1060 + struct amdgpu_hmm_range *range; 1061 1061 int ret = 0; 1062 1062 1063 1063 mutex_lock(&process_info->lock); ··· 1089 1089 return 0; 1090 1090 } 1091 1091 1092 - range = amdgpu_hmm_range_alloc(); 1092 + range = amdgpu_hmm_range_alloc(NULL); 1093 1093 if (unlikely(!range)) { 1094 1094 ret = -ENOMEM; 1095 1095 goto unregister_out; ··· 2573 2573 } 2574 2574 } 2575 2575 2576 - mem->range = amdgpu_hmm_range_alloc(); 2576 + mem->range = amdgpu_hmm_range_alloc(NULL); 2577 2577 if (unlikely(!mem->range)) 2578 2578 return -ENOMEM; 2579 2579 /* Get updated user pages */
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
··· 38 38 struct amdgpu_bo *bo; 39 39 struct amdgpu_bo_va *bo_va; 40 40 uint32_t priority; 41 - struct hmm_range *range; 41 + struct amdgpu_hmm_range *range; 42 42 bool user_invalidated; 43 43 }; 44 44
+3 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
··· 29 29 #include <linux/pagemap.h> 30 30 #include <linux/sync_file.h> 31 31 #include <linux/dma-buf.h> 32 - #include <linux/hmm.h> 33 32 34 33 #include <drm/amdgpu_drm.h> 35 34 #include <drm/drm_syncobj.h> ··· 891 892 bool userpage_invalidated = false; 892 893 struct amdgpu_bo *bo = e->bo; 893 894 894 - e->range = amdgpu_hmm_range_alloc(); 895 + e->range = amdgpu_hmm_range_alloc(NULL); 895 896 if (unlikely(!e->range)) 896 897 return -ENOMEM; 897 898 ··· 900 901 goto out_free_user_pages; 901 902 902 903 for (i = 0; i < bo->tbo.ttm->num_pages; i++) { 903 - if (bo->tbo.ttm->pages[i] != hmm_pfn_to_page(e->range->hmm_pfns[i])) { 904 + if (bo->tbo.ttm->pages[i] != 905 + hmm_pfn_to_page(e->range->hmm_range.hmm_pfns[i])) { 904 906 userpage_invalidated = true; 905 907 break; 906 908 }
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
··· 531 531 struct drm_amdgpu_gem_userptr *args = data; 532 532 struct amdgpu_fpriv *fpriv = filp->driver_priv; 533 533 struct drm_gem_object *gobj; 534 - struct hmm_range *range; 534 + struct amdgpu_hmm_range *range; 535 535 struct amdgpu_bo *bo; 536 536 uint32_t handle; 537 537 int r; ··· 572 572 goto release_object; 573 573 574 574 if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) { 575 - range = amdgpu_hmm_range_alloc(); 575 + range = amdgpu_hmm_range_alloc(NULL); 576 576 if (unlikely(!range)) 577 577 return -ENOMEM; 578 578 r = amdgpu_ttm_tt_get_user_pages(bo, range);
+20 -11
drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
··· 168 168 int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, 169 169 uint64_t start, uint64_t npages, bool readonly, 170 170 void *owner, 171 - struct hmm_range *hmm_range) 171 + struct amdgpu_hmm_range *range) 172 172 { 173 173 unsigned long end; 174 174 unsigned long timeout; 175 175 unsigned long *pfns; 176 176 int r = 0; 177 + struct hmm_range *hmm_range = &range->hmm_range; 177 178 178 179 pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL); 179 180 if (unlikely(!pfns)) { ··· 227 226 return r; 228 227 } 229 228 230 - bool amdgpu_hmm_range_valid(struct hmm_range *hmm_range) 229 + bool amdgpu_hmm_range_valid(struct amdgpu_hmm_range *range) 231 230 { 232 - if (!hmm_range) 231 + if (!range) 233 232 return false; 234 233 235 - return !mmu_interval_read_retry(hmm_range->notifier, 236 - hmm_range->notifier_seq); 234 + return !mmu_interval_read_retry(range->hmm_range.notifier, 235 + range->hmm_range.notifier_seq); 237 236 } 238 237 239 - struct hmm_range *amdgpu_hmm_range_alloc(void) 238 + struct amdgpu_hmm_range *amdgpu_hmm_range_alloc(struct amdgpu_bo *bo) 240 239 { 241 - return kzalloc(sizeof(struct hmm_range), GFP_KERNEL); 240 + struct amdgpu_hmm_range *range; 241 + 242 + range = kzalloc(sizeof(*range), GFP_KERNEL); 243 + if (!range) 244 + return NULL; 245 + 246 + range->bo = amdgpu_bo_ref(bo); 247 + return range; 242 248 } 243 249 244 - void amdgpu_hmm_range_free(struct hmm_range *hmm_range) 250 + void amdgpu_hmm_range_free(struct amdgpu_hmm_range *range) 245 251 { 246 - if (!hmm_range) 252 + if (!range) 247 253 return; 248 254 249 - kvfree(hmm_range->hmm_pfns); 250 - kfree(hmm_range); 255 + kvfree(range->hmm_range.hmm_pfns); 256 + amdgpu_bo_unref(&range->bo); 257 + kfree(range); 251 258 }
+12 -7
drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h
··· 31 31 #include <linux/interval_tree.h> 32 32 #include <linux/mmu_notifier.h> 33 33 34 + struct amdgpu_hmm_range { 35 + struct hmm_range hmm_range; 36 + struct amdgpu_bo *bo; 37 + }; 38 + 34 39 int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, 35 40 uint64_t start, uint64_t npages, bool readonly, 36 41 void *owner, 37 - struct hmm_range *hmm_range); 42 + struct amdgpu_hmm_range *range); 38 43 39 44 #if defined(CONFIG_HMM_MIRROR) 40 - bool amdgpu_hmm_range_valid(struct hmm_range *hmm_range); 41 - struct hmm_range *amdgpu_hmm_range_alloc(void); 42 - void amdgpu_hmm_range_free(struct hmm_range *hmm_range); 45 + bool amdgpu_hmm_range_valid(struct amdgpu_hmm_range *range); 46 + struct amdgpu_hmm_range *amdgpu_hmm_range_alloc(struct amdgpu_bo *bo); 47 + void amdgpu_hmm_range_free(struct amdgpu_hmm_range *range); 43 48 int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr); 44 49 void amdgpu_hmm_unregister(struct amdgpu_bo *bo); 45 50 #else ··· 57 52 58 53 static inline void amdgpu_hmm_unregister(struct amdgpu_bo *bo) {} 59 54 60 - static inline bool amdgpu_hmm_range_valid(struct hmm_range *hmm_range) 55 + static inline bool amdgpu_hmm_range_valid(struct amdgpu_hmm_range *range) 61 56 { 62 57 return false; 63 58 } 64 59 65 - static inline struct hmm_range *amdgpu_hmm_range_alloc(void) 60 + static inline struct amdgpu_hmm_range *amdgpu_hmm_range_alloc(struct amdgpu_bo *bo) 66 61 { 67 62 return NULL; 68 63 } 69 64 70 - static inline void amdgpu_hmm_range_free(struct hmm_range *hmm_range) {} 65 + static inline void amdgpu_hmm_range_free(struct amdgpu_hmm_range *range) {} 71 66 #endif 72 67 73 68 #endif
+3 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 709 709 * that range is a valid memory and it is freed too. 710 710 */ 711 711 int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, 712 - struct hmm_range *range) 712 + struct amdgpu_hmm_range *range) 713 713 { 714 714 struct ttm_tt *ttm = bo->tbo.ttm; 715 715 struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); ··· 762 762 * that backs user memory and will ultimately be mapped into the device 763 763 * address space. 764 764 */ 765 - void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct hmm_range *range) 765 + void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct amdgpu_hmm_range *range) 766 766 { 767 767 unsigned long i; 768 768 769 769 for (i = 0; i < ttm->num_pages; ++i) 770 - ttm->pages[i] = range ? hmm_pfn_to_page(range->hmm_pfns[i]) : NULL; 770 + ttm->pages[i] = range ? hmm_pfn_to_page(range->hmm_range.hmm_pfns[i]) : NULL; 771 771 } 772 772 773 773 /*
+4 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
··· 28 28 #include <drm/gpu_scheduler.h> 29 29 #include <drm/ttm/ttm_placement.h> 30 30 #include "amdgpu_vram_mgr.h" 31 + #include "amdgpu_hmm.h" 31 32 32 33 #define AMDGPU_PL_GDS (TTM_PL_PRIV + 0) 33 34 #define AMDGPU_PL_GWS (TTM_PL_PRIV + 1) ··· 187 186 188 187 #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) 189 188 int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, 190 - struct hmm_range *range); 189 + struct amdgpu_hmm_range *range); 191 190 #else 192 191 static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, 193 - struct hmm_range *range) 192 + struct amdgpu_hmm_range *range) 194 193 { 195 194 return -EPERM; 196 195 } 197 196 #endif 198 197 199 - void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct hmm_range *range); 198 + void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct amdgpu_hmm_range *range); 200 199 int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo, 201 200 uint64_t *user_addr); 202 201 int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
-1
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
··· 21 21 * OTHER DEALINGS IN THE SOFTWARE. 22 22 */ 23 23 #include <linux/types.h> 24 - #include <linux/hmm.h> 25 24 #include <linux/dma-direction.h> 26 25 #include <linux/dma-mapping.h> 27 26 #include <linux/migrate.h>
-1
drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
··· 31 31 #include <linux/list.h> 32 32 #include <linux/mutex.h> 33 33 #include <linux/sched/mm.h> 34 - #include <linux/hmm.h> 35 34 #include "kfd_priv.h" 36 35 #include "kfd_svm.h" 37 36
+7 -7
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
··· 1698 1698 start = map_start << PAGE_SHIFT; 1699 1699 end = (map_last + 1) << PAGE_SHIFT; 1700 1700 for (addr = start; !r && addr < end; ) { 1701 - struct hmm_range *hmm_range = NULL; 1701 + struct amdgpu_hmm_range *range = NULL; 1702 1702 unsigned long map_start_vma; 1703 1703 unsigned long map_last_vma; 1704 1704 struct vm_area_struct *vma; ··· 1737 1737 } 1738 1738 1739 1739 WRITE_ONCE(p->svms.faulting_task, current); 1740 - hmm_range = amdgpu_hmm_range_alloc(); 1740 + range = amdgpu_hmm_range_alloc(NULL); 1741 1741 r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages, 1742 1742 readonly, owner, 1743 - hmm_range); 1743 + range); 1744 1744 WRITE_ONCE(p->svms.faulting_task, NULL); 1745 1745 if (r) { 1746 - amdgpu_hmm_range_free(hmm_range); 1746 + amdgpu_hmm_range_free(range); 1747 1747 pr_debug("failed %d to get svm range pages\n", r); 1748 1748 } 1749 1749 } else { ··· 1753 1753 if (!r) { 1754 1754 offset = (addr >> PAGE_SHIFT) - prange->start; 1755 1755 r = svm_range_dma_map(prange, ctx->bitmap, offset, npages, 1756 - hmm_range->hmm_pfns); 1756 + range->hmm_range.hmm_pfns); 1757 1757 if (r) 1758 1758 pr_debug("failed %d to dma map range\n", r); 1759 1759 } ··· 1764 1764 * Overrride return value to TRY AGAIN only if prior returns 1765 1765 * were successful 1766 1766 */ 1767 - if (hmm_range && !amdgpu_hmm_range_valid(hmm_range) && !r) { 1767 + if (range && !amdgpu_hmm_range_valid(range) && !r) { 1768 1768 pr_debug("hmm update the range, need validate again\n"); 1769 1769 r = -EAGAIN; 1770 1770 } 1771 1771 /* Free the hmm range */ 1772 - amdgpu_hmm_range_free(hmm_range); 1772 + amdgpu_hmm_range_free(range); 1773 1773 1774 1774 1775 1775 if (!r && !list_empty(&prange->child_list)) {
-1
drivers/gpu/drm/amd/amdkfd/kfd_svm.h
··· 31 31 #include <linux/list.h> 32 32 #include <linux/mutex.h> 33 33 #include <linux/sched/mm.h> 34 - #include <linux/hmm.h> 35 34 #include "amdgpu.h" 36 35 #include "kfd_priv.h" 37 36