Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * linux/drivers/video/fb_defio.c
3 *
4 * Copyright (C) 2006 Jaya Kumar
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive
8 * for more details.
9 */
10
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/errno.h>
14#include <linux/export.h>
15#include <linux/string.h>
16#include <linux/mm.h>
17#include <linux/vmalloc.h>
18#include <linux/delay.h>
19#include <linux/interrupt.h>
20#include <linux/fb.h>
21#include <linux/list.h>
22
23/* to support deferred IO */
24#include <linux/rmap.h>
25#include <linux/pagemap.h>
26
27struct address_space;
28
29/*
30 * struct fb_deferred_io_state
31 */
32
33struct fb_deferred_io_state {
34 struct kref ref;
35
36 int open_count; /* number of opened files; protected by fb_info lock */
37 struct address_space *mapping; /* page cache object for fb device */
38
39 struct mutex lock; /* mutex that protects the pageref list */
40 /* fields protected by lock */
41 struct fb_info *info;
42 struct list_head pagereflist; /* list of pagerefs for touched pages */
43 unsigned long npagerefs;
44 struct fb_deferred_io_pageref *pagerefs;
45};
46
47static struct fb_deferred_io_state *fb_deferred_io_state_alloc(unsigned long len)
48{
49 struct fb_deferred_io_state *fbdefio_state;
50 struct fb_deferred_io_pageref *pagerefs;
51 unsigned long npagerefs;
52
53 fbdefio_state = kzalloc_obj(*fbdefio_state);
54 if (!fbdefio_state)
55 return NULL;
56
57 npagerefs = DIV_ROUND_UP(len, PAGE_SIZE);
58
59 /* alloc a page ref for each page of the display memory */
60 pagerefs = kvzalloc_objs(*pagerefs, npagerefs);
61 if (!pagerefs)
62 goto err_kfree;
63 fbdefio_state->npagerefs = npagerefs;
64 fbdefio_state->pagerefs = pagerefs;
65
66 kref_init(&fbdefio_state->ref);
67 mutex_init(&fbdefio_state->lock);
68
69 INIT_LIST_HEAD(&fbdefio_state->pagereflist);
70
71 return fbdefio_state;
72
73err_kfree:
74 kfree(fbdefio_state);
75 return NULL;
76}
77
78static void fb_deferred_io_state_release(struct fb_deferred_io_state *fbdefio_state)
79{
80 WARN_ON(!list_empty(&fbdefio_state->pagereflist));
81 mutex_destroy(&fbdefio_state->lock);
82 kvfree(fbdefio_state->pagerefs);
83
84 kfree(fbdefio_state);
85}
86
87static void fb_deferred_io_state_get(struct fb_deferred_io_state *fbdefio_state)
88{
89 kref_get(&fbdefio_state->ref);
90}
91
92static void __fb_deferred_io_state_release(struct kref *ref)
93{
94 struct fb_deferred_io_state *fbdefio_state =
95 container_of(ref, struct fb_deferred_io_state, ref);
96
97 fb_deferred_io_state_release(fbdefio_state);
98}
99
100static void fb_deferred_io_state_put(struct fb_deferred_io_state *fbdefio_state)
101{
102 kref_put(&fbdefio_state->ref, __fb_deferred_io_state_release);
103}
104
105/*
106 * struct vm_operations_struct
107 */
108
109static void fb_deferred_io_vm_open(struct vm_area_struct *vma)
110{
111 struct fb_deferred_io_state *fbdefio_state = vma->vm_private_data;
112
113 WARN_ON_ONCE(!try_module_get(THIS_MODULE));
114 fb_deferred_io_state_get(fbdefio_state);
115}
116
117static void fb_deferred_io_vm_close(struct vm_area_struct *vma)
118{
119 struct fb_deferred_io_state *fbdefio_state = vma->vm_private_data;
120
121 fb_deferred_io_state_put(fbdefio_state);
122 module_put(THIS_MODULE);
123}
124
125static struct page *fb_deferred_io_get_page(struct fb_info *info, unsigned long offs)
126{
127 struct fb_deferred_io *fbdefio = info->fbdefio;
128 const void *screen_buffer = info->screen_buffer;
129 struct page *page = NULL;
130
131 if (fbdefio->get_page)
132 return fbdefio->get_page(info, offs);
133
134 if (is_vmalloc_addr(screen_buffer + offs))
135 page = vmalloc_to_page(screen_buffer + offs);
136 else if (info->fix.smem_start)
137 page = pfn_to_page((info->fix.smem_start + offs) >> PAGE_SHIFT);
138
139 if (page)
140 get_page(page);
141
142 return page;
143}
144
145static struct fb_deferred_io_pageref *
146fb_deferred_io_pageref_lookup(struct fb_deferred_io_state *fbdefio_state, unsigned long offset,
147 struct page *page)
148{
149 struct fb_info *info = fbdefio_state->info;
150 unsigned long pgoff = offset >> PAGE_SHIFT;
151 struct fb_deferred_io_pageref *pageref;
152
153 if (fb_WARN_ON_ONCE(info, pgoff >= fbdefio_state->npagerefs))
154 return NULL; /* incorrect allocation size */
155
156 /* 1:1 mapping between pageref and page offset */
157 pageref = &fbdefio_state->pagerefs[pgoff];
158
159 if (pageref->page)
160 goto out;
161
162 pageref->page = page;
163 pageref->offset = pgoff << PAGE_SHIFT;
164 INIT_LIST_HEAD(&pageref->list);
165
166out:
167 if (fb_WARN_ON_ONCE(info, pageref->page != page))
168 return NULL; /* inconsistent state */
169 return pageref;
170}
171
172static struct fb_deferred_io_pageref *fb_deferred_io_pageref_get(struct fb_info *info,
173 unsigned long offset,
174 struct page *page)
175{
176 struct fb_deferred_io *fbdefio = info->fbdefio;
177 struct fb_deferred_io_state *fbdefio_state = info->fbdefio_state;
178 struct list_head *pos = &fbdefio_state->pagereflist;
179 struct fb_deferred_io_pageref *pageref, *cur;
180
181 pageref = fb_deferred_io_pageref_lookup(fbdefio_state, offset, page);
182 if (!pageref)
183 return NULL;
184
185 /*
186 * This check is to catch the case where a new process could start
187 * writing to the same page through a new PTE. This new access
188 * can cause a call to .page_mkwrite even if the original process'
189 * PTE is marked writable.
190 */
191 if (!list_empty(&pageref->list))
192 goto pageref_already_added;
193
194 if (unlikely(fbdefio->sort_pagereflist)) {
195 /*
196 * We loop through the list of pagerefs before adding in
197 * order to keep the pagerefs sorted. This has significant
198 * overhead of O(n^2) with n being the number of written
199 * pages. If possible, drivers should try to work with
200 * unsorted page lists instead.
201 */
202 list_for_each_entry(cur, &fbdefio_state->pagereflist, list) {
203 if (cur->offset > pageref->offset)
204 break;
205 }
206 pos = &cur->list;
207 }
208
209 list_add_tail(&pageref->list, pos);
210
211pageref_already_added:
212 return pageref;
213}
214
215static void fb_deferred_io_pageref_put(struct fb_deferred_io_pageref *pageref,
216 struct fb_info *info)
217{
218 list_del_init(&pageref->list);
219}
220
221/* this is to find and return the vmalloc-ed fb pages */
222static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf)
223{
224 struct fb_info *info;
225 unsigned long offset;
226 struct page *page;
227 vm_fault_t ret;
228 struct fb_deferred_io_state *fbdefio_state = vmf->vma->vm_private_data;
229
230 mutex_lock(&fbdefio_state->lock);
231
232 info = fbdefio_state->info;
233 if (!info) {
234 ret = VM_FAULT_SIGBUS; /* our device is gone */
235 goto err_mutex_unlock;
236 }
237
238 offset = vmf->pgoff << PAGE_SHIFT;
239 if (offset >= info->fix.smem_len) {
240 ret = VM_FAULT_SIGBUS;
241 goto err_mutex_unlock;
242 }
243
244 page = fb_deferred_io_get_page(info, offset);
245 if (!page) {
246 ret = VM_FAULT_SIGBUS;
247 goto err_mutex_unlock;
248 }
249
250 if (!vmf->vma->vm_file)
251 fb_err(info, "no mapping available\n");
252
253 fb_WARN_ON_ONCE(info, !fbdefio_state->mapping);
254
255 mutex_unlock(&fbdefio_state->lock);
256
257 vmf->page = page;
258
259 return 0;
260
261err_mutex_unlock:
262 mutex_unlock(&fbdefio_state->lock);
263 return ret;
264}
265
266int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasync)
267{
268 struct fb_info *info = file->private_data;
269 struct inode *inode = file_inode(file);
270 int err = file_write_and_wait_range(file, start, end);
271 if (err)
272 return err;
273
274 /* Skip if deferred io is compiled-in but disabled on this fbdev */
275 if (!info->fbdefio)
276 return 0;
277
278 inode_lock(inode);
279 flush_delayed_work(&info->deferred_work);
280 inode_unlock(inode);
281
282 return 0;
283}
284EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
285
286/*
287 * Adds a page to the dirty list. Call this from struct
288 * vm_operations_struct.page_mkwrite.
289 */
290static vm_fault_t fb_deferred_io_track_page(struct fb_deferred_io_state *fbdefio_state,
291 unsigned long offset, struct page *page)
292{
293 struct fb_info *info;
294 struct fb_deferred_io *fbdefio;
295 struct fb_deferred_io_pageref *pageref;
296 vm_fault_t ret;
297
298 /* protect against the workqueue changing the page list */
299 mutex_lock(&fbdefio_state->lock);
300
301 info = fbdefio_state->info;
302 if (!info) {
303 ret = VM_FAULT_SIGBUS; /* our device is gone */
304 goto err_mutex_unlock;
305 }
306
307 fbdefio = info->fbdefio;
308
309 pageref = fb_deferred_io_pageref_get(info, offset, page);
310 if (WARN_ON_ONCE(!pageref)) {
311 ret = VM_FAULT_OOM;
312 goto err_mutex_unlock;
313 }
314
315 /*
316 * We want the page to remain locked from ->page_mkwrite until
317 * the PTE is marked dirty to avoid mapping_wrprotect_range()
318 * being called before the PTE is updated, which would leave
319 * the page ignored by defio.
320 * Do this by locking the page here and informing the caller
321 * about it with VM_FAULT_LOCKED.
322 */
323 lock_page(pageref->page);
324
325 mutex_unlock(&fbdefio_state->lock);
326
327 /* come back after delay to process the deferred IO */
328 schedule_delayed_work(&info->deferred_work, fbdefio->delay);
329 return VM_FAULT_LOCKED;
330
331err_mutex_unlock:
332 mutex_unlock(&fbdefio_state->lock);
333 return ret;
334}
335
336static vm_fault_t fb_deferred_io_page_mkwrite(struct fb_deferred_io_state *fbdefio_state,
337 struct vm_fault *vmf)
338{
339 unsigned long offset = vmf->pgoff << PAGE_SHIFT;
340 struct page *page = vmf->page;
341
342 file_update_time(vmf->vma->vm_file);
343
344 return fb_deferred_io_track_page(fbdefio_state, offset, page);
345}
346
347static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf)
348{
349 struct fb_deferred_io_state *fbdefio_state = vmf->vma->vm_private_data;
350
351 return fb_deferred_io_page_mkwrite(fbdefio_state, vmf);
352}
353
354static const struct vm_operations_struct fb_deferred_io_vm_ops = {
355 .open = fb_deferred_io_vm_open,
356 .close = fb_deferred_io_vm_close,
357 .fault = fb_deferred_io_fault,
358 .page_mkwrite = fb_deferred_io_mkwrite,
359};
360
361static const struct address_space_operations fb_deferred_io_aops = {
362 .dirty_folio = noop_dirty_folio,
363};
364
365int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
366{
367 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
368
369 if (!try_module_get(THIS_MODULE))
370 return -EINVAL;
371
372 vma->vm_ops = &fb_deferred_io_vm_ops;
373 vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
374 if (!(info->flags & FBINFO_VIRTFB))
375 vm_flags_set(vma, VM_IO);
376 vma->vm_private_data = info->fbdefio_state;
377
378 fb_deferred_io_state_get(info->fbdefio_state); /* released in vma->vm_ops->close() */
379
380 return 0;
381}
382EXPORT_SYMBOL_GPL(fb_deferred_io_mmap);
383
384/* workqueue callback */
385static void fb_deferred_io_work(struct work_struct *work)
386{
387 struct fb_info *info = container_of(work, struct fb_info, deferred_work.work);
388 struct fb_deferred_io_pageref *pageref, *next;
389 struct fb_deferred_io *fbdefio = info->fbdefio;
390 struct fb_deferred_io_state *fbdefio_state = info->fbdefio_state;
391
392 /* here we wrprotect the page's mappings, then do all deferred IO. */
393 mutex_lock(&fbdefio_state->lock);
394#ifdef CONFIG_MMU
395 list_for_each_entry(pageref, &fbdefio_state->pagereflist, list) {
396 struct page *page = pageref->page;
397 pgoff_t pgoff = pageref->offset >> PAGE_SHIFT;
398
399 mapping_wrprotect_range(fbdefio_state->mapping, pgoff,
400 page_to_pfn(page), 1);
401 }
402#endif
403
404 /* driver's callback with pagereflist */
405 fbdefio->deferred_io(info, &fbdefio_state->pagereflist);
406
407 /* clear the list */
408 list_for_each_entry_safe(pageref, next, &fbdefio_state->pagereflist, list)
409 fb_deferred_io_pageref_put(pageref, info);
410
411 mutex_unlock(&fbdefio_state->lock);
412}
413
414int fb_deferred_io_init(struct fb_info *info)
415{
416 struct fb_deferred_io *fbdefio = info->fbdefio;
417 struct fb_deferred_io_state *fbdefio_state;
418
419 BUG_ON(!fbdefio);
420
421 if (WARN_ON(!info->fix.smem_len))
422 return -EINVAL;
423
424 fbdefio_state = fb_deferred_io_state_alloc(info->fix.smem_len);
425 if (!fbdefio_state)
426 return -ENOMEM;
427 fbdefio_state->info = info;
428
429 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
430 if (fbdefio->delay == 0) /* set a default of 1 s */
431 fbdefio->delay = HZ;
432
433 info->fbdefio_state = fbdefio_state;
434
435 return 0;
436}
437EXPORT_SYMBOL_GPL(fb_deferred_io_init);
438
439void fb_deferred_io_open(struct fb_info *info,
440 struct inode *inode,
441 struct file *file)
442{
443 struct fb_deferred_io_state *fbdefio_state = info->fbdefio_state;
444
445 fbdefio_state->mapping = file->f_mapping;
446 file->f_mapping->a_ops = &fb_deferred_io_aops;
447 fbdefio_state->open_count++;
448}
449EXPORT_SYMBOL_GPL(fb_deferred_io_open);
450
451static void fb_deferred_io_lastclose(struct fb_info *info)
452{
453 flush_delayed_work(&info->deferred_work);
454}
455
456void fb_deferred_io_release(struct fb_info *info)
457{
458 struct fb_deferred_io_state *fbdefio_state = info->fbdefio_state;
459
460 if (!--fbdefio_state->open_count)
461 fb_deferred_io_lastclose(info);
462}
463EXPORT_SYMBOL_GPL(fb_deferred_io_release);
464
465void fb_deferred_io_cleanup(struct fb_info *info)
466{
467 struct fb_deferred_io_state *fbdefio_state = info->fbdefio_state;
468
469 fb_deferred_io_lastclose(info);
470
471 info->fbdefio_state = NULL;
472
473 mutex_lock(&fbdefio_state->lock);
474 fbdefio_state->info = NULL;
475 mutex_unlock(&fbdefio_state->lock);
476
477 fb_deferred_io_state_put(fbdefio_state);
478}
479EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);