Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2016 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7#include <linux/vmalloc.h>
8#include <linux/sched/mm.h>
9
10#include "msm_drv.h"
11#include "msm_gem.h"
12#include "msm_gpu.h"
13#include "msm_gpu_trace.h"
14
15/* Default disabled for now until it has some more testing on the different
16 * iommu combinations that can be paired with the driver:
17 */
18static bool enable_eviction = true;
19MODULE_PARM_DESC(enable_eviction, "Enable swappable GEM buffers");
20module_param(enable_eviction, bool, 0600);
21
22static bool can_swap(void)
23{
24 return enable_eviction && get_nr_swap_pages() > 0;
25}
26
27static bool can_block(struct shrink_control *sc)
28{
29 return (sc->gfp_mask & __GFP_DIRECT_RECLAIM) ||
30 (current_is_kswapd() && (sc->gfp_mask & __GFP_KSWAPD_RECLAIM));
31}
32
33static unsigned long
34msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
35{
36 struct msm_drm_private *priv = shrinker->private_data;
37 unsigned count = priv->lru.dontneed.count;
38
39 if (can_swap())
40 count += priv->lru.willneed.count;
41
42 return count;
43}
44
45static bool
46with_vm_locks(struct ww_acquire_ctx *ticket,
47 void (*fn)(struct drm_gem_object *obj),
48 struct drm_gem_object *obj)
49{
50 /*
51 * Track last locked entry for for unwinding locks in error and
52 * success paths
53 */
54 struct drm_gpuvm_bo *vm_bo, *last_locked = NULL;
55 int ret = 0;
56
57 drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
58 struct dma_resv *resv = drm_gpuvm_resv(vm_bo->vm);
59
60 if (resv == obj->resv)
61 continue;
62
63 ret = dma_resv_lock(resv, ticket);
64
65 /*
66 * Since we already skip the case when the VM and obj
67 * share a resv (ie. _NO_SHARE objs), we don't expect
68 * to hit a double-locking scenario... which the lock
69 * unwinding cannot really cope with.
70 */
71 WARN_ON(ret == -EALREADY);
72
73 /*
74 * Don't bother with slow-lock / backoff / retry sequence,
75 * if we can't get the lock just give up and move on to
76 * the next object.
77 */
78 if (ret)
79 goto out_unlock;
80
81 /*
82 * Hold a ref to prevent the vm_bo from being freed
83 * and removed from the obj's gpuva list, as that would
84 * would result in missing the unlock below
85 */
86 drm_gpuvm_bo_get(vm_bo);
87
88 last_locked = vm_bo;
89 }
90
91 fn(obj);
92
93out_unlock:
94 if (last_locked) {
95 drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
96 struct dma_resv *resv = drm_gpuvm_resv(vm_bo->vm);
97
98 if (resv == obj->resv)
99 continue;
100
101 dma_resv_unlock(resv);
102
103 /* Drop the ref taken while locking: */
104 drm_gpuvm_bo_put(vm_bo);
105
106 if (last_locked == vm_bo)
107 break;
108 }
109 }
110
111 return ret == 0;
112}
113
114static bool
115purge(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket)
116{
117 if (!is_purgeable(to_msm_bo(obj)))
118 return false;
119
120 if (msm_gem_active(obj))
121 return false;
122
123 return with_vm_locks(ticket, msm_gem_purge, obj);
124}
125
126static bool
127evict(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket)
128{
129 if (is_unevictable(to_msm_bo(obj)))
130 return false;
131
132 if (msm_gem_active(obj))
133 return false;
134
135 return with_vm_locks(ticket, msm_gem_evict, obj);
136}
137
138static bool
139wait_for_idle(struct drm_gem_object *obj)
140{
141 enum dma_resv_usage usage = DMA_RESV_USAGE_BOOKKEEP;
142 return dma_resv_wait_timeout(obj->resv, usage, false, 10) > 0;
143}
144
145static bool
146active_purge(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket)
147{
148 if (!wait_for_idle(obj))
149 return false;
150
151 return purge(obj, ticket);
152}
153
154static bool
155active_evict(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket)
156{
157 if (!wait_for_idle(obj))
158 return false;
159
160 return evict(obj, ticket);
161}
162
163static unsigned long
164msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
165{
166 struct msm_drm_private *priv = shrinker->private_data;
167 struct ww_acquire_ctx ticket;
168 struct {
169 struct drm_gem_lru *lru;
170 bool (*shrink)(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket);
171 bool cond;
172 unsigned long freed;
173 unsigned long remaining;
174 } stages[] = {
175 /* Stages of progressively more aggressive/expensive reclaim: */
176 { &priv->lru.dontneed, purge, true },
177 { &priv->lru.willneed, evict, can_swap() },
178 { &priv->lru.dontneed, active_purge, can_block(sc) },
179 { &priv->lru.willneed, active_evict, can_swap() && can_block(sc) },
180 };
181 long nr = sc->nr_to_scan;
182 unsigned long freed = 0;
183 unsigned long remaining = 0;
184
185 for (unsigned i = 0; (nr > 0) && (i < ARRAY_SIZE(stages)); i++) {
186 if (!stages[i].cond)
187 continue;
188 stages[i].freed =
189 drm_gem_lru_scan(stages[i].lru, nr,
190 &stages[i].remaining,
191 stages[i].shrink,
192 &ticket);
193 nr -= stages[i].freed;
194 freed += stages[i].freed;
195 remaining += stages[i].remaining;
196 }
197
198 if (freed) {
199 trace_msm_gem_shrink(sc->nr_to_scan, stages[0].freed,
200 stages[1].freed, stages[2].freed,
201 stages[3].freed);
202 }
203
204 return (freed > 0 && remaining > 0) ? freed : SHRINK_STOP;
205}
206
207#ifdef CONFIG_DEBUG_FS
208unsigned long
209msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan)
210{
211 struct msm_drm_private *priv = dev->dev_private;
212 struct shrink_control sc = {
213 .nr_to_scan = nr_to_scan,
214 };
215 unsigned long ret = SHRINK_STOP;
216
217 fs_reclaim_acquire(GFP_KERNEL);
218 if (priv->shrinker)
219 ret = msm_gem_shrinker_scan(priv->shrinker, &sc);
220 fs_reclaim_release(GFP_KERNEL);
221
222 return ret;
223}
224#endif
225
226/* since we don't know any better, lets bail after a few
227 * and if necessary the shrinker will be invoked again.
228 * Seems better than unmapping *everything*
229 */
230static const int vmap_shrink_limit = 15;
231
232static bool
233vmap_shrink(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket)
234{
235 if (!is_vunmapable(to_msm_bo(obj)))
236 return false;
237
238 msm_gem_vunmap(obj);
239
240 return true;
241}
242
243static int
244msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
245{
246 struct msm_drm_private *priv =
247 container_of(nb, struct msm_drm_private, vmap_notifier);
248 struct drm_gem_lru *lrus[] = {
249 &priv->lru.dontneed,
250 &priv->lru.willneed,
251 &priv->lru.pinned,
252 NULL,
253 };
254 unsigned idx, unmapped = 0;
255 unsigned long remaining = 0;
256
257 for (idx = 0; lrus[idx] && unmapped < vmap_shrink_limit; idx++) {
258 unmapped += drm_gem_lru_scan(lrus[idx],
259 vmap_shrink_limit - unmapped,
260 &remaining,
261 vmap_shrink,
262 NULL);
263 }
264
265 *(unsigned long *)ptr += unmapped;
266
267 if (unmapped > 0)
268 trace_msm_gem_purge_vmaps(unmapped);
269
270 return NOTIFY_DONE;
271}
272
273/**
274 * msm_gem_shrinker_init - Initialize msm shrinker
275 * @dev: drm device
276 *
277 * This function registers and sets up the msm shrinker.
278 */
279int msm_gem_shrinker_init(struct drm_device *dev)
280{
281 struct msm_drm_private *priv = dev->dev_private;
282
283 priv->shrinker = shrinker_alloc(0, "drm-msm_gem");
284 if (!priv->shrinker)
285 return -ENOMEM;
286
287 priv->shrinker->count_objects = msm_gem_shrinker_count;
288 priv->shrinker->scan_objects = msm_gem_shrinker_scan;
289 priv->shrinker->private_data = priv;
290
291 shrinker_register(priv->shrinker);
292
293 priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
294 WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
295
296 return 0;
297}
298
299/**
300 * msm_gem_shrinker_cleanup - Clean up msm shrinker
301 * @dev: drm device
302 *
303 * This function unregisters the msm shrinker.
304 */
305void msm_gem_shrinker_cleanup(struct drm_device *dev)
306{
307 struct msm_drm_private *priv = dev->dev_private;
308
309 if (priv->shrinker) {
310 WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
311 shrinker_free(priv->shrinker);
312 }
313}