Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * fs/f2fs/checkpoint.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8#include <linux/fs.h>
9#include <linux/bio.h>
10#include <linux/mpage.h>
11#include <linux/writeback.h>
12#include <linux/blkdev.h>
13#include <linux/f2fs_fs.h>
14#include <linux/folio_batch.h>
15#include <linux/swap.h>
16#include <linux/kthread.h>
17#include <linux/delayacct.h>
18#include <linux/ioprio.h>
19#include <linux/math64.h>
20
21#include "f2fs.h"
22#include "node.h"
23#include "segment.h"
24#include "iostat.h"
25#include <trace/events/f2fs.h>
26
27static inline void get_lock_elapsed_time(struct f2fs_time_stat *ts)
28{
29 ts->total_time = ktime_get();
30#ifdef CONFIG_64BIT
31 ts->running_time = current->se.sum_exec_runtime;
32#endif
33#if defined(CONFIG_SCHED_INFO) && defined(CONFIG_SCHEDSTATS)
34 ts->runnable_time = current->sched_info.run_delay;
35#endif
36#ifdef CONFIG_TASK_DELAY_ACCT
37 if (current->delays)
38 ts->io_sleep_time = current->delays->blkio_delay;
39#endif
40}
41
42static inline void trace_lock_elapsed_time_start(struct f2fs_rwsem *sem,
43 struct f2fs_lock_context *lc)
44{
45 lc->lock_trace = trace_f2fs_lock_elapsed_time_enabled();
46 if (!lc->lock_trace)
47 return;
48
49 get_lock_elapsed_time(&lc->ts);
50}
51
52static inline void trace_lock_elapsed_time_end(struct f2fs_rwsem *sem,
53 struct f2fs_lock_context *lc, bool is_write)
54{
55 struct f2fs_time_stat tts;
56 unsigned long long total_time;
57 unsigned long long running_time = 0;
58 unsigned long long runnable_time = 0;
59 unsigned long long io_sleep_time = 0;
60 unsigned long long other_time = 0;
61 unsigned npm = NSEC_PER_MSEC;
62
63 if (!lc->lock_trace)
64 return;
65
66 if (time_to_inject(sem->sbi, FAULT_LOCK_TIMEOUT))
67 f2fs_schedule_timeout_killable(DEFAULT_FAULT_TIMEOUT, true);
68
69 get_lock_elapsed_time(&tts);
70
71 total_time = div_u64(tts.total_time - lc->ts.total_time, npm);
72 if (total_time <= sem->sbi->max_lock_elapsed_time)
73 return;
74
75#ifdef CONFIG_64BIT
76 running_time = div_u64(tts.running_time - lc->ts.running_time, npm);
77#endif
78#if defined(CONFIG_SCHED_INFO) && defined(CONFIG_SCHEDSTATS)
79 runnable_time = div_u64(tts.runnable_time - lc->ts.runnable_time, npm);
80#endif
81#ifdef CONFIG_TASK_DELAY_ACCT
82 io_sleep_time = div_u64(tts.io_sleep_time - lc->ts.io_sleep_time, npm);
83#endif
84 if (total_time > running_time + io_sleep_time + runnable_time)
85 other_time = total_time - running_time -
86 io_sleep_time - runnable_time;
87
88 trace_f2fs_lock_elapsed_time(sem->sbi, sem->name, is_write, current,
89 get_current_ioprio(), total_time, running_time,
90 runnable_time, io_sleep_time, other_time);
91}
92
93static bool need_uplift_priority(struct f2fs_rwsem *sem, bool is_write)
94{
95 if (!(sem->sbi->adjust_lock_priority & BIT(sem->name - 1)))
96 return false;
97
98 switch (sem->name) {
99 /*
100 * writer is checkpoint which has high priority, let's just uplift
101 * priority for reader
102 */
103 case LOCK_NAME_CP_RWSEM:
104 case LOCK_NAME_NODE_CHANGE:
105 case LOCK_NAME_NODE_WRITE:
106 return !is_write;
107 case LOCK_NAME_GC_LOCK:
108 case LOCK_NAME_CP_GLOBAL:
109 case LOCK_NAME_IO_RWSEM:
110 return true;
111 default:
112 f2fs_bug_on(sem->sbi, 1);
113 }
114 return false;
115}
116
117static void uplift_priority(struct f2fs_rwsem *sem, struct f2fs_lock_context *lc,
118 bool is_write)
119{
120 lc->need_restore = false;
121 if (!sem->sbi->adjust_lock_priority)
122 return;
123 if (rt_task(current))
124 return;
125 if (!need_uplift_priority(sem, is_write))
126 return;
127 lc->orig_nice = task_nice(current);
128 lc->new_nice = PRIO_TO_NICE(sem->sbi->lock_duration_priority);
129 if (lc->orig_nice <= lc->new_nice)
130 return;
131 set_user_nice(current, lc->new_nice);
132 lc->need_restore = true;
133
134 trace_f2fs_priority_uplift(sem->sbi, sem->name, is_write, current,
135 NICE_TO_PRIO(lc->orig_nice), NICE_TO_PRIO(lc->new_nice));
136}
137
138static void restore_priority(struct f2fs_rwsem *sem, struct f2fs_lock_context *lc,
139 bool is_write)
140{
141 if (!lc->need_restore)
142 return;
143 /* someone has updated the priority */
144 if (task_nice(current) != lc->new_nice)
145 return;
146 set_user_nice(current, lc->orig_nice);
147
148 trace_f2fs_priority_restore(sem->sbi, sem->name, is_write, current,
149 NICE_TO_PRIO(lc->orig_nice), NICE_TO_PRIO(lc->new_nice));
150}
151
152void f2fs_down_read_trace(struct f2fs_rwsem *sem, struct f2fs_lock_context *lc)
153{
154 uplift_priority(sem, lc, false);
155 f2fs_down_read(sem);
156 trace_lock_elapsed_time_start(sem, lc);
157}
158
159int f2fs_down_read_trylock_trace(struct f2fs_rwsem *sem, struct f2fs_lock_context *lc)
160{
161 uplift_priority(sem, lc, false);
162 if (!f2fs_down_read_trylock(sem)) {
163 restore_priority(sem, lc, false);
164 return 0;
165 }
166 trace_lock_elapsed_time_start(sem, lc);
167 return 1;
168}
169
170void f2fs_up_read_trace(struct f2fs_rwsem *sem, struct f2fs_lock_context *lc)
171{
172 f2fs_up_read(sem);
173 restore_priority(sem, lc, false);
174 trace_lock_elapsed_time_end(sem, lc, false);
175}
176
177void f2fs_down_write_trace(struct f2fs_rwsem *sem, struct f2fs_lock_context *lc)
178{
179 uplift_priority(sem, lc, true);
180 f2fs_down_write(sem);
181 trace_lock_elapsed_time_start(sem, lc);
182}
183
184int f2fs_down_write_trylock_trace(struct f2fs_rwsem *sem, struct f2fs_lock_context *lc)
185{
186 uplift_priority(sem, lc, true);
187 if (!f2fs_down_write_trylock(sem)) {
188 restore_priority(sem, lc, true);
189 return 0;
190 }
191 trace_lock_elapsed_time_start(sem, lc);
192 return 1;
193}
194
195void f2fs_up_write_trace(struct f2fs_rwsem *sem, struct f2fs_lock_context *lc)
196{
197 f2fs_up_write(sem);
198 restore_priority(sem, lc, true);
199 trace_lock_elapsed_time_end(sem, lc, true);
200}
201
202void f2fs_lock_op(struct f2fs_sb_info *sbi, struct f2fs_lock_context *lc)
203{
204 f2fs_down_read_trace(&sbi->cp_rwsem, lc);
205}
206
207int f2fs_trylock_op(struct f2fs_sb_info *sbi, struct f2fs_lock_context *lc)
208{
209 if (time_to_inject(sbi, FAULT_LOCK_OP))
210 return 0;
211
212 return f2fs_down_read_trylock_trace(&sbi->cp_rwsem, lc);
213}
214
215void f2fs_unlock_op(struct f2fs_sb_info *sbi, struct f2fs_lock_context *lc)
216{
217 f2fs_up_read_trace(&sbi->cp_rwsem, lc);
218}
219
220static inline void f2fs_lock_all(struct f2fs_sb_info *sbi)
221{
222 f2fs_down_write(&sbi->cp_rwsem);
223}
224
225static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
226{
227 f2fs_up_write(&sbi->cp_rwsem);
228}
229
230#define DEFAULT_CHECKPOINT_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_RT, 3))
231
232static struct kmem_cache *ino_entry_slab;
233struct kmem_cache *f2fs_inode_entry_slab;
234
235/*
236 * We guarantee no failure on the returned page.
237 */
238struct folio *f2fs_grab_meta_folio(struct f2fs_sb_info *sbi, pgoff_t index)
239{
240 struct address_space *mapping = META_MAPPING(sbi);
241 struct folio *folio;
242repeat:
243 folio = f2fs_grab_cache_folio(mapping, index, false);
244 if (IS_ERR(folio)) {
245 cond_resched();
246 goto repeat;
247 }
248 f2fs_folio_wait_writeback(folio, META, true, true);
249 if (!folio_test_uptodate(folio))
250 folio_mark_uptodate(folio);
251 return folio;
252}
253
254static struct folio *__get_meta_folio(struct f2fs_sb_info *sbi, pgoff_t index,
255 bool is_meta)
256{
257 struct address_space *mapping = META_MAPPING(sbi);
258 struct folio *folio;
259 struct f2fs_io_info fio = {
260 .sbi = sbi,
261 .type = META,
262 .op = REQ_OP_READ,
263 .op_flags = REQ_META | REQ_PRIO,
264 .old_blkaddr = index,
265 .new_blkaddr = index,
266 .encrypted_page = NULL,
267 .is_por = !is_meta ? 1 : 0,
268 };
269 int err;
270
271 if (unlikely(!is_meta))
272 fio.op_flags &= ~REQ_META;
273repeat:
274 folio = f2fs_grab_cache_folio(mapping, index, false);
275 if (IS_ERR(folio)) {
276 cond_resched();
277 goto repeat;
278 }
279 if (folio_test_uptodate(folio))
280 goto out;
281
282 fio.folio = folio;
283
284 err = f2fs_submit_page_bio(&fio);
285 if (err) {
286 f2fs_folio_put(folio, true);
287 return ERR_PTR(err);
288 }
289
290 f2fs_update_iostat(sbi, NULL, FS_META_READ_IO, F2FS_BLKSIZE);
291
292 folio_lock(folio);
293 if (unlikely(!is_meta_folio(folio))) {
294 f2fs_folio_put(folio, true);
295 goto repeat;
296 }
297
298 if (unlikely(!folio_test_uptodate(folio))) {
299 f2fs_handle_page_eio(sbi, folio, META);
300 f2fs_folio_put(folio, true);
301 return ERR_PTR(-EIO);
302 }
303out:
304 return folio;
305}
306
307struct folio *f2fs_get_meta_folio(struct f2fs_sb_info *sbi, pgoff_t index)
308{
309 return __get_meta_folio(sbi, index, true);
310}
311
312struct folio *f2fs_get_meta_folio_retry(struct f2fs_sb_info *sbi, pgoff_t index)
313{
314 struct folio *folio;
315 int count = 0;
316
317retry:
318 folio = __get_meta_folio(sbi, index, true);
319 if (IS_ERR(folio)) {
320 if (PTR_ERR(folio) == -EIO &&
321 ++count <= DEFAULT_RETRY_IO_COUNT)
322 goto retry;
323 f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_META_PAGE);
324 }
325 return folio;
326}
327
328/* for POR only */
329struct folio *f2fs_get_tmp_folio(struct f2fs_sb_info *sbi, pgoff_t index)
330{
331 return __get_meta_folio(sbi, index, false);
332}
333
334static bool __is_bitmap_valid(struct f2fs_sb_info *sbi, block_t blkaddr,
335 int type)
336{
337 struct seg_entry *se;
338 unsigned int segno, offset;
339 bool exist;
340
341 if (type == DATA_GENERIC)
342 return true;
343
344 segno = GET_SEGNO(sbi, blkaddr);
345 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
346 se = get_seg_entry(sbi, segno);
347
348 exist = f2fs_test_bit(offset, se->cur_valid_map);
349
350 /* skip data, if we already have an error in checkpoint. */
351 if (unlikely(f2fs_cp_error(sbi)))
352 return exist;
353
354 if ((exist && type == DATA_GENERIC_ENHANCE_UPDATE) ||
355 (!exist && type == DATA_GENERIC_ENHANCE))
356 goto out_err;
357 if (!exist && type != DATA_GENERIC_ENHANCE_UPDATE)
358 goto out_handle;
359 return exist;
360
361out_err:
362 f2fs_err(sbi, "Inconsistent error blkaddr:%u, sit bitmap:%d",
363 blkaddr, exist);
364 set_sbi_flag(sbi, SBI_NEED_FSCK);
365 dump_stack();
366out_handle:
367 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
368 return exist;
369}
370
371static bool __f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
372 block_t blkaddr, int type)
373{
374 switch (type) {
375 case META_NAT:
376 break;
377 case META_SIT:
378 if (unlikely(blkaddr >= SIT_BLK_CNT(sbi)))
379 goto check_only;
380 break;
381 case META_SSA:
382 if (unlikely(blkaddr >= MAIN_BLKADDR(sbi) ||
383 blkaddr < SM_I(sbi)->ssa_blkaddr))
384 goto check_only;
385 break;
386 case META_CP:
387 if (unlikely(blkaddr >= SIT_I(sbi)->sit_base_addr ||
388 blkaddr < __start_cp_addr(sbi)))
389 goto check_only;
390 break;
391 case META_POR:
392 if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
393 blkaddr < MAIN_BLKADDR(sbi)))
394 goto check_only;
395 break;
396 case DATA_GENERIC:
397 case DATA_GENERIC_ENHANCE:
398 case DATA_GENERIC_ENHANCE_READ:
399 case DATA_GENERIC_ENHANCE_UPDATE:
400 if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
401 blkaddr < MAIN_BLKADDR(sbi))) {
402
403 /* Skip to emit an error message. */
404 if (unlikely(f2fs_cp_error(sbi)))
405 return false;
406
407 f2fs_warn(sbi, "access invalid blkaddr:%u",
408 blkaddr);
409 set_sbi_flag(sbi, SBI_NEED_FSCK);
410 dump_stack();
411 goto err;
412 } else {
413 return __is_bitmap_valid(sbi, blkaddr, type);
414 }
415 break;
416 case META_GENERIC:
417 if (unlikely(blkaddr < SEG0_BLKADDR(sbi) ||
418 blkaddr >= MAIN_BLKADDR(sbi)))
419 goto err;
420 break;
421 default:
422 BUG();
423 }
424
425 return true;
426err:
427 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
428check_only:
429 return false;
430}
431
432bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
433 block_t blkaddr, int type)
434{
435 if (time_to_inject(sbi, FAULT_BLKADDR_VALIDITY))
436 return false;
437 return __f2fs_is_valid_blkaddr(sbi, blkaddr, type);
438}
439
440bool f2fs_is_valid_blkaddr_raw(struct f2fs_sb_info *sbi,
441 block_t blkaddr, int type)
442{
443 return __f2fs_is_valid_blkaddr(sbi, blkaddr, type);
444}
445
446/*
447 * Readahead CP/NAT/SIT/SSA/POR pages
448 */
449int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
450 int type, bool sync)
451{
452 block_t blkno = start;
453 struct f2fs_io_info fio = {
454 .sbi = sbi,
455 .type = META,
456 .op = REQ_OP_READ,
457 .op_flags = sync ? (REQ_META | REQ_PRIO) : REQ_RAHEAD,
458 .encrypted_page = NULL,
459 .in_list = 0,
460 .is_por = (type == META_POR) ? 1 : 0,
461 };
462 struct blk_plug plug;
463 int err;
464
465 if (unlikely(type == META_POR))
466 fio.op_flags &= ~REQ_META;
467
468 blk_start_plug(&plug);
469 for (; nrpages-- > 0; blkno++) {
470 struct folio *folio;
471
472 if (!f2fs_is_valid_blkaddr(sbi, blkno, type))
473 goto out;
474
475 switch (type) {
476 case META_NAT:
477 if (unlikely(blkno >=
478 NAT_BLOCK_OFFSET(NM_I(sbi)->max_nid)))
479 blkno = 0;
480 /* get nat block addr */
481 fio.new_blkaddr = current_nat_addr(sbi,
482 blkno * NAT_ENTRY_PER_BLOCK);
483 break;
484 case META_SIT:
485 if (unlikely(blkno >= TOTAL_SEGS(sbi)))
486 goto out;
487 /* get sit block addr */
488 fio.new_blkaddr = current_sit_addr(sbi,
489 blkno * SIT_ENTRY_PER_BLOCK);
490 break;
491 case META_SSA:
492 case META_CP:
493 case META_POR:
494 fio.new_blkaddr = blkno;
495 break;
496 default:
497 BUG();
498 }
499
500 folio = f2fs_grab_cache_folio(META_MAPPING(sbi),
501 fio.new_blkaddr, false);
502 if (IS_ERR(folio))
503 continue;
504 if (folio_test_uptodate(folio)) {
505 f2fs_folio_put(folio, true);
506 continue;
507 }
508
509 fio.folio = folio;
510 err = f2fs_submit_page_bio(&fio);
511 f2fs_folio_put(folio, err ? true : false);
512
513 if (!err)
514 f2fs_update_iostat(sbi, NULL, FS_META_READ_IO,
515 F2FS_BLKSIZE);
516 }
517out:
518 blk_finish_plug(&plug);
519 return blkno - start;
520}
521
522void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index,
523 unsigned int ra_blocks)
524{
525 struct folio *folio;
526 bool readahead = false;
527
528 if (ra_blocks == RECOVERY_MIN_RA_BLOCKS)
529 return;
530
531 folio = filemap_get_folio(META_MAPPING(sbi), index);
532 if (IS_ERR(folio) || !folio_test_uptodate(folio))
533 readahead = true;
534 f2fs_folio_put(folio, false);
535
536 if (readahead)
537 f2fs_ra_meta_pages(sbi, index, ra_blocks, META_POR, true);
538}
539
540static bool __f2fs_write_meta_folio(struct folio *folio,
541 struct writeback_control *wbc,
542 enum iostat_type io_type)
543{
544 struct f2fs_sb_info *sbi = F2FS_F_SB(folio);
545
546 trace_f2fs_writepage(folio, META);
547
548 if (unlikely(f2fs_cp_error(sbi))) {
549 if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) {
550 folio_clear_uptodate(folio);
551 dec_page_count(sbi, F2FS_DIRTY_META);
552 folio_unlock(folio);
553 return true;
554 }
555 goto redirty_out;
556 }
557 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
558 goto redirty_out;
559
560 f2fs_do_write_meta_page(sbi, folio, io_type);
561 dec_page_count(sbi, F2FS_DIRTY_META);
562
563 folio_unlock(folio);
564
565 if (unlikely(f2fs_cp_error(sbi)))
566 f2fs_submit_merged_write(sbi, META);
567
568 return true;
569
570redirty_out:
571 folio_redirty_for_writepage(wbc, folio);
572 return false;
573}
574
575static int f2fs_write_meta_pages(struct address_space *mapping,
576 struct writeback_control *wbc)
577{
578 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
579 struct f2fs_lock_context lc;
580 long diff, written;
581
582 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
583 goto skip_write;
584
585 /* collect a number of dirty meta pages and write together */
586 if (wbc->sync_mode != WB_SYNC_ALL &&
587 get_pages(sbi, F2FS_DIRTY_META) <
588 nr_pages_to_skip(sbi, META))
589 goto skip_write;
590
591 /* if locked failed, cp will flush dirty pages instead */
592 if (!f2fs_down_write_trylock_trace(&sbi->cp_global_sem, &lc))
593 goto skip_write;
594
595 trace_f2fs_writepages(mapping->host, wbc, META);
596 diff = nr_pages_to_write(sbi, META, wbc);
597 written = f2fs_sync_meta_pages(sbi, wbc->nr_to_write, FS_META_IO);
598 f2fs_up_write_trace(&sbi->cp_global_sem, &lc);
599 wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff);
600 return 0;
601
602skip_write:
603 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_META);
604 trace_f2fs_writepages(mapping->host, wbc, META);
605 return 0;
606}
607
608long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, long nr_to_write,
609 enum iostat_type io_type)
610{
611 struct address_space *mapping = META_MAPPING(sbi);
612 pgoff_t index = 0, prev = ULONG_MAX;
613 struct folio_batch fbatch;
614 long nwritten = 0;
615 int nr_folios;
616 struct writeback_control wbc = {};
617 struct blk_plug plug;
618
619 folio_batch_init(&fbatch);
620
621 blk_start_plug(&plug);
622
623 while ((nr_folios = filemap_get_folios_tag(mapping, &index,
624 (pgoff_t)-1,
625 PAGECACHE_TAG_DIRTY, &fbatch))) {
626 int i;
627
628 for (i = 0; i < nr_folios; i++) {
629 struct folio *folio = fbatch.folios[i];
630
631 if (nr_to_write != LONG_MAX && i != 0 &&
632 folio->index != prev +
633 folio_nr_pages(fbatch.folios[i-1])) {
634 folio_batch_release(&fbatch);
635 goto stop;
636 }
637
638 folio_lock(folio);
639
640 if (unlikely(!is_meta_folio(folio))) {
641continue_unlock:
642 folio_unlock(folio);
643 continue;
644 }
645 if (!folio_test_dirty(folio)) {
646 /* someone wrote it for us */
647 goto continue_unlock;
648 }
649
650 f2fs_folio_wait_writeback(folio, META, true, true);
651
652 if (!folio_clear_dirty_for_io(folio))
653 goto continue_unlock;
654
655 if (!__f2fs_write_meta_folio(folio, &wbc,
656 io_type)) {
657 folio_unlock(folio);
658 break;
659 }
660 nwritten += folio_nr_pages(folio);
661 prev = folio->index;
662 if (unlikely(nwritten >= nr_to_write))
663 break;
664 }
665 folio_batch_release(&fbatch);
666 cond_resched();
667 }
668stop:
669 if (nwritten)
670 f2fs_submit_merged_write(sbi, META);
671
672 blk_finish_plug(&plug);
673
674 return nwritten;
675}
676
677static bool f2fs_dirty_meta_folio(struct address_space *mapping,
678 struct folio *folio)
679{
680 trace_f2fs_set_page_dirty(folio, META);
681
682 if (!folio_test_uptodate(folio))
683 folio_mark_uptodate(folio);
684 if (filemap_dirty_folio(mapping, folio)) {
685 inc_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_META);
686 folio_set_f2fs_reference(folio);
687 return true;
688 }
689 return false;
690}
691
692const struct address_space_operations f2fs_meta_aops = {
693 .writepages = f2fs_write_meta_pages,
694 .dirty_folio = f2fs_dirty_meta_folio,
695 .invalidate_folio = f2fs_invalidate_folio,
696 .release_folio = f2fs_release_folio,
697 .migrate_folio = filemap_migrate_folio,
698};
699
700static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino,
701 unsigned int devidx, int type)
702{
703 struct inode_management *im = &sbi->im[type];
704 struct ino_entry *e = NULL, *new = NULL;
705 int ret;
706
707 if (type == FLUSH_INO) {
708 rcu_read_lock();
709 e = radix_tree_lookup(&im->ino_root, ino);
710 rcu_read_unlock();
711 }
712
713retry:
714 if (!e)
715 new = f2fs_kmem_cache_alloc(ino_entry_slab,
716 GFP_NOFS, true, NULL);
717
718 ret = radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
719 f2fs_bug_on(sbi, ret);
720
721 spin_lock(&im->ino_lock);
722 e = radix_tree_lookup(&im->ino_root, ino);
723 if (!e) {
724 if (!new) {
725 spin_unlock(&im->ino_lock);
726 radix_tree_preload_end();
727 goto retry;
728 }
729 e = new;
730 if (unlikely(radix_tree_insert(&im->ino_root, ino, e)))
731 f2fs_bug_on(sbi, 1);
732
733 memset(e, 0, sizeof(struct ino_entry));
734 e->ino = ino;
735
736 list_add_tail(&e->list, &im->ino_list);
737 if (type != ORPHAN_INO)
738 im->ino_num++;
739 }
740
741 if (type == FLUSH_INO)
742 f2fs_set_bit(devidx, (char *)&e->dirty_device);
743
744 spin_unlock(&im->ino_lock);
745 radix_tree_preload_end();
746
747 if (new && e != new)
748 kmem_cache_free(ino_entry_slab, new);
749}
750
751static void __remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
752{
753 struct inode_management *im = &sbi->im[type];
754 struct ino_entry *e;
755
756 spin_lock(&im->ino_lock);
757 e = radix_tree_lookup(&im->ino_root, ino);
758 if (e) {
759 list_del(&e->list);
760 radix_tree_delete(&im->ino_root, ino);
761 im->ino_num--;
762 spin_unlock(&im->ino_lock);
763 kmem_cache_free(ino_entry_slab, e);
764 return;
765 }
766 spin_unlock(&im->ino_lock);
767}
768
769void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
770{
771 /* add new dirty ino entry into list */
772 __add_ino_entry(sbi, ino, 0, type);
773}
774
775void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type)
776{
777 /* remove dirty ino entry from list */
778 __remove_ino_entry(sbi, ino, type);
779}
780
781/* mode should be APPEND_INO, UPDATE_INO or TRANS_DIR_INO */
782bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode)
783{
784 struct inode_management *im = &sbi->im[mode];
785 struct ino_entry *e;
786
787 spin_lock(&im->ino_lock);
788 e = radix_tree_lookup(&im->ino_root, ino);
789 spin_unlock(&im->ino_lock);
790 return e ? true : false;
791}
792
793void f2fs_release_ino_entry(struct f2fs_sb_info *sbi, bool all)
794{
795 struct ino_entry *e, *tmp;
796 int i;
797
798 for (i = all ? ORPHAN_INO : APPEND_INO; i < MAX_INO_ENTRY; i++) {
799 struct inode_management *im = &sbi->im[i];
800
801 spin_lock(&im->ino_lock);
802 list_for_each_entry_safe(e, tmp, &im->ino_list, list) {
803 list_del(&e->list);
804 radix_tree_delete(&im->ino_root, e->ino);
805 kmem_cache_free(ino_entry_slab, e);
806 im->ino_num--;
807 }
808 spin_unlock(&im->ino_lock);
809 }
810}
811
812void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
813 unsigned int devidx, int type)
814{
815 __add_ino_entry(sbi, ino, devidx, type);
816}
817
818bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
819 unsigned int devidx, int type)
820{
821 struct inode_management *im = &sbi->im[type];
822 struct ino_entry *e;
823 bool is_dirty = false;
824
825 spin_lock(&im->ino_lock);
826 e = radix_tree_lookup(&im->ino_root, ino);
827 if (e && f2fs_test_bit(devidx, (char *)&e->dirty_device))
828 is_dirty = true;
829 spin_unlock(&im->ino_lock);
830 return is_dirty;
831}
832
833int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi)
834{
835 struct inode_management *im = &sbi->im[ORPHAN_INO];
836 int err = 0;
837
838 spin_lock(&im->ino_lock);
839
840 if (time_to_inject(sbi, FAULT_ORPHAN)) {
841 spin_unlock(&im->ino_lock);
842 return -ENOSPC;
843 }
844
845 if (unlikely(im->ino_num >= sbi->max_orphans))
846 err = -ENOSPC;
847 else
848 im->ino_num++;
849 spin_unlock(&im->ino_lock);
850
851 return err;
852}
853
854void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi)
855{
856 struct inode_management *im = &sbi->im[ORPHAN_INO];
857
858 spin_lock(&im->ino_lock);
859 f2fs_bug_on(sbi, im->ino_num == 0);
860 im->ino_num--;
861 spin_unlock(&im->ino_lock);
862}
863
864void f2fs_add_orphan_inode(struct inode *inode)
865{
866 /* add new orphan ino entry into list */
867 __add_ino_entry(F2FS_I_SB(inode), inode->i_ino, 0, ORPHAN_INO);
868 f2fs_update_inode_page(inode);
869}
870
871void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
872{
873 /* remove orphan entry from orphan list */
874 __remove_ino_entry(sbi, ino, ORPHAN_INO);
875}
876
877static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
878{
879 struct inode *inode;
880 struct node_info ni;
881 int err;
882
883 inode = f2fs_iget_retry(sbi->sb, ino);
884 if (IS_ERR(inode)) {
885 /*
886 * there should be a bug that we can't find the entry
887 * to orphan inode.
888 */
889 f2fs_bug_on(sbi, PTR_ERR(inode) == -ENOENT);
890 return PTR_ERR(inode);
891 }
892
893 err = f2fs_dquot_initialize(inode);
894 if (err) {
895 iput(inode);
896 goto err_out;
897 }
898
899 clear_nlink(inode);
900
901 /* truncate all the data during iput */
902 iput(inode);
903
904 err = f2fs_get_node_info(sbi, ino, &ni, false);
905 if (err)
906 goto err_out;
907
908 /* ENOMEM was fully retried in f2fs_evict_inode. */
909 if (ni.blk_addr != NULL_ADDR) {
910 err = -EIO;
911 goto err_out;
912 }
913 return 0;
914
915err_out:
916 set_sbi_flag(sbi, SBI_NEED_FSCK);
917 f2fs_warn(sbi, "%s: orphan failed (ino=%x), run fsck to fix.",
918 __func__, ino);
919 return err;
920}
921
922int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi)
923{
924 block_t start_blk, orphan_blocks, i, j;
925 int err = 0;
926
927 if (!is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG))
928 return 0;
929
930 if (f2fs_hw_is_readonly(sbi)) {
931 f2fs_info(sbi, "write access unavailable, skipping orphan cleanup");
932 return 0;
933 }
934
935 if (is_sbi_flag_set(sbi, SBI_IS_WRITABLE))
936 f2fs_info(sbi, "orphan cleanup on readonly fs");
937
938 start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi);
939 orphan_blocks = __start_sum_addr(sbi) - 1 - __cp_payload(sbi);
940
941 f2fs_ra_meta_pages(sbi, start_blk, orphan_blocks, META_CP, true);
942
943 for (i = 0; i < orphan_blocks; i++) {
944 struct folio *folio;
945 struct f2fs_orphan_block *orphan_blk;
946
947 folio = f2fs_get_meta_folio(sbi, start_blk + i);
948 if (IS_ERR(folio)) {
949 err = PTR_ERR(folio);
950 goto out;
951 }
952
953 orphan_blk = folio_address(folio);
954 for (j = 0; j < le32_to_cpu(orphan_blk->entry_count); j++) {
955 nid_t ino = le32_to_cpu(orphan_blk->ino[j]);
956
957 err = recover_orphan_inode(sbi, ino);
958 if (err) {
959 f2fs_folio_put(folio, true);
960 goto out;
961 }
962 }
963 f2fs_folio_put(folio, true);
964 }
965 /* clear Orphan Flag */
966 clear_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG);
967out:
968 set_sbi_flag(sbi, SBI_IS_RECOVERED);
969
970 return err;
971}
972
973static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
974{
975 struct list_head *head;
976 struct f2fs_orphan_block *orphan_blk = NULL;
977 unsigned int nentries = 0;
978 unsigned short index = 1;
979 unsigned short orphan_blocks;
980 struct folio *folio = NULL;
981 struct ino_entry *orphan = NULL;
982 struct inode_management *im = &sbi->im[ORPHAN_INO];
983
984 orphan_blocks = GET_ORPHAN_BLOCKS(im->ino_num);
985
986 /*
987 * we don't need to do spin_lock(&im->ino_lock) here, since all the
988 * orphan inode operations are covered under f2fs_lock_op().
989 * And, spin_lock should be avoided due to page operations below.
990 */
991 head = &im->ino_list;
992
993 /* loop for each orphan inode entry and write them in journal block */
994 list_for_each_entry(orphan, head, list) {
995 if (!folio) {
996 folio = f2fs_grab_meta_folio(sbi, start_blk++);
997 orphan_blk = folio_address(folio);
998 memset(orphan_blk, 0, sizeof(*orphan_blk));
999 }
1000
1001 orphan_blk->ino[nentries++] = cpu_to_le32(orphan->ino);
1002
1003 if (nentries == F2FS_ORPHANS_PER_BLOCK) {
1004 /*
1005 * an orphan block is full of 1020 entries,
1006 * then we need to flush current orphan blocks
1007 * and bring another one in memory
1008 */
1009 orphan_blk->blk_addr = cpu_to_le16(index);
1010 orphan_blk->blk_count = cpu_to_le16(orphan_blocks);
1011 orphan_blk->entry_count = cpu_to_le32(nentries);
1012 folio_mark_dirty(folio);
1013 f2fs_folio_put(folio, true);
1014 index++;
1015 nentries = 0;
1016 folio = NULL;
1017 }
1018 }
1019
1020 if (folio) {
1021 orphan_blk->blk_addr = cpu_to_le16(index);
1022 orphan_blk->blk_count = cpu_to_le16(orphan_blocks);
1023 orphan_blk->entry_count = cpu_to_le32(nentries);
1024 folio_mark_dirty(folio);
1025 f2fs_folio_put(folio, true);
1026 }
1027}
1028
1029static __u32 f2fs_checkpoint_chksum(struct f2fs_checkpoint *ckpt)
1030{
1031 unsigned int chksum_ofs = le32_to_cpu(ckpt->checksum_offset);
1032 __u32 chksum;
1033
1034 chksum = f2fs_crc32(ckpt, chksum_ofs);
1035 if (chksum_ofs < CP_CHKSUM_OFFSET) {
1036 chksum_ofs += sizeof(chksum);
1037 chksum = f2fs_chksum(chksum, (__u8 *)ckpt + chksum_ofs,
1038 F2FS_BLKSIZE - chksum_ofs);
1039 }
1040 return chksum;
1041}
1042
1043static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr,
1044 struct f2fs_checkpoint **cp_block, struct folio **cp_folio,
1045 unsigned long long *version)
1046{
1047 size_t crc_offset = 0;
1048 __u32 crc;
1049
1050 *cp_folio = f2fs_get_meta_folio(sbi, cp_addr);
1051 if (IS_ERR(*cp_folio))
1052 return PTR_ERR(*cp_folio);
1053
1054 *cp_block = folio_address(*cp_folio);
1055
1056 crc_offset = le32_to_cpu((*cp_block)->checksum_offset);
1057 if (crc_offset < CP_MIN_CHKSUM_OFFSET ||
1058 crc_offset > CP_CHKSUM_OFFSET) {
1059 f2fs_folio_put(*cp_folio, true);
1060 f2fs_warn(sbi, "invalid crc_offset: %zu", crc_offset);
1061 return -EINVAL;
1062 }
1063
1064 crc = f2fs_checkpoint_chksum(*cp_block);
1065 if (crc != cur_cp_crc(*cp_block)) {
1066 f2fs_folio_put(*cp_folio, true);
1067 f2fs_warn(sbi, "invalid crc value");
1068 return -EINVAL;
1069 }
1070
1071 *version = cur_cp_version(*cp_block);
1072 return 0;
1073}
1074
1075static struct folio *validate_checkpoint(struct f2fs_sb_info *sbi,
1076 block_t cp_addr, unsigned long long *version)
1077{
1078 struct folio *cp_folio_1 = NULL, *cp_folio_2 = NULL;
1079 struct f2fs_checkpoint *cp_block = NULL;
1080 unsigned long long cur_version = 0, pre_version = 0;
1081 unsigned int cp_blocks;
1082 int err;
1083
1084 err = get_checkpoint_version(sbi, cp_addr, &cp_block,
1085 &cp_folio_1, version);
1086 if (err)
1087 return NULL;
1088
1089 cp_blocks = le32_to_cpu(cp_block->cp_pack_total_block_count);
1090
1091 if (cp_blocks > BLKS_PER_SEG(sbi) || cp_blocks <= F2FS_CP_PACKS) {
1092 f2fs_warn(sbi, "invalid cp_pack_total_block_count:%u",
1093 le32_to_cpu(cp_block->cp_pack_total_block_count));
1094 goto invalid_cp;
1095 }
1096 pre_version = *version;
1097
1098 cp_addr += cp_blocks - 1;
1099 err = get_checkpoint_version(sbi, cp_addr, &cp_block,
1100 &cp_folio_2, version);
1101 if (err)
1102 goto invalid_cp;
1103 cur_version = *version;
1104
1105 if (cur_version == pre_version) {
1106 *version = cur_version;
1107 f2fs_folio_put(cp_folio_2, true);
1108 return cp_folio_1;
1109 }
1110 f2fs_folio_put(cp_folio_2, true);
1111invalid_cp:
1112 f2fs_folio_put(cp_folio_1, true);
1113 return NULL;
1114}
1115
1116int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
1117{
1118 struct f2fs_checkpoint *cp_block;
1119 struct f2fs_super_block *fsb = sbi->raw_super;
1120 struct folio *cp1, *cp2, *cur_folio;
1121 unsigned long blk_size = sbi->blocksize;
1122 unsigned long long cp1_version = 0, cp2_version = 0;
1123 unsigned long long cp_start_blk_no;
1124 unsigned int cp_blks = 1 + __cp_payload(sbi);
1125 block_t cp_blk_no;
1126 int i;
1127 int err;
1128
1129 sbi->ckpt = f2fs_kvzalloc(sbi, array_size(blk_size, cp_blks),
1130 GFP_KERNEL);
1131 if (!sbi->ckpt)
1132 return -ENOMEM;
1133 /*
1134 * Finding out valid cp block involves read both
1135 * sets( cp pack 1 and cp pack 2)
1136 */
1137 cp_start_blk_no = le32_to_cpu(fsb->cp_blkaddr);
1138 cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version);
1139
1140 /* The second checkpoint pack should start at the next segment */
1141 cp_start_blk_no += ((unsigned long long)1) <<
1142 le32_to_cpu(fsb->log_blocks_per_seg);
1143 cp2 = validate_checkpoint(sbi, cp_start_blk_no, &cp2_version);
1144
1145 if (cp1 && cp2) {
1146 if (ver_after(cp2_version, cp1_version))
1147 cur_folio = cp2;
1148 else
1149 cur_folio = cp1;
1150 } else if (cp1) {
1151 cur_folio = cp1;
1152 } else if (cp2) {
1153 cur_folio = cp2;
1154 } else {
1155 err = -EFSCORRUPTED;
1156 goto fail_no_cp;
1157 }
1158
1159 cp_block = folio_address(cur_folio);
1160 memcpy(sbi->ckpt, cp_block, blk_size);
1161
1162 if (cur_folio == cp1)
1163 sbi->cur_cp_pack = 1;
1164 else
1165 sbi->cur_cp_pack = 2;
1166
1167 /* Sanity checking of checkpoint */
1168 if (f2fs_sanity_check_ckpt(sbi)) {
1169 err = -EFSCORRUPTED;
1170 goto free_fail_no_cp;
1171 }
1172
1173 if (cp_blks <= 1)
1174 goto done;
1175
1176 cp_blk_no = le32_to_cpu(fsb->cp_blkaddr);
1177 if (cur_folio == cp2)
1178 cp_blk_no += BIT(le32_to_cpu(fsb->log_blocks_per_seg));
1179
1180 for (i = 1; i < cp_blks; i++) {
1181 void *sit_bitmap_ptr;
1182 unsigned char *ckpt = (unsigned char *)sbi->ckpt;
1183
1184 cur_folio = f2fs_get_meta_folio(sbi, cp_blk_no + i);
1185 if (IS_ERR(cur_folio)) {
1186 err = PTR_ERR(cur_folio);
1187 goto free_fail_no_cp;
1188 }
1189 sit_bitmap_ptr = folio_address(cur_folio);
1190 memcpy(ckpt + i * blk_size, sit_bitmap_ptr, blk_size);
1191 f2fs_folio_put(cur_folio, true);
1192 }
1193done:
1194 f2fs_folio_put(cp1, true);
1195 f2fs_folio_put(cp2, true);
1196 return 0;
1197
1198free_fail_no_cp:
1199 f2fs_folio_put(cp1, true);
1200 f2fs_folio_put(cp2, true);
1201fail_no_cp:
1202 kvfree(sbi->ckpt);
1203 return err;
1204}
1205
1206static void __add_dirty_inode(struct inode *inode, enum inode_type type)
1207{
1208 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1209 int flag = (type == DIR_INODE) ? FI_DIRTY_DIR : FI_DIRTY_FILE;
1210
1211 if (is_inode_flag_set(inode, flag))
1212 return;
1213
1214 set_inode_flag(inode, flag);
1215 list_add_tail(&F2FS_I(inode)->dirty_list, &sbi->inode_list[type]);
1216 stat_inc_dirty_inode(sbi, type);
1217}
1218
1219static void __remove_dirty_inode(struct inode *inode, enum inode_type type)
1220{
1221 int flag = (type == DIR_INODE) ? FI_DIRTY_DIR : FI_DIRTY_FILE;
1222
1223 if (get_dirty_pages(inode) || !is_inode_flag_set(inode, flag))
1224 return;
1225
1226 list_del_init(&F2FS_I(inode)->dirty_list);
1227 clear_inode_flag(inode, flag);
1228 stat_dec_dirty_inode(F2FS_I_SB(inode), type);
1229}
1230
1231void f2fs_update_dirty_folio(struct inode *inode, struct folio *folio)
1232{
1233 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1234 enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE;
1235
1236 if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
1237 !S_ISLNK(inode->i_mode))
1238 return;
1239
1240 spin_lock(&sbi->inode_lock[type]);
1241 if (type != FILE_INODE || test_opt(sbi, DATA_FLUSH))
1242 __add_dirty_inode(inode, type);
1243 inode_inc_dirty_pages(inode);
1244 spin_unlock(&sbi->inode_lock[type]);
1245
1246 folio_set_f2fs_reference(folio);
1247}
1248
1249void f2fs_remove_dirty_inode(struct inode *inode)
1250{
1251 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1252 enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE;
1253
1254 if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
1255 !S_ISLNK(inode->i_mode))
1256 return;
1257
1258 if (type == FILE_INODE && !test_opt(sbi, DATA_FLUSH))
1259 return;
1260
1261 spin_lock(&sbi->inode_lock[type]);
1262 __remove_dirty_inode(inode, type);
1263 spin_unlock(&sbi->inode_lock[type]);
1264}
1265
1266int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type,
1267 bool from_cp)
1268{
1269 struct list_head *head;
1270 struct inode *inode;
1271 struct f2fs_inode_info *fi;
1272 bool is_dir = (type == DIR_INODE);
1273 unsigned long ino = 0;
1274
1275 trace_f2fs_sync_dirty_inodes_enter(sbi->sb, is_dir,
1276 get_pages(sbi, is_dir ?
1277 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA));
1278retry:
1279 if (unlikely(f2fs_cp_error(sbi))) {
1280 trace_f2fs_sync_dirty_inodes_exit(sbi->sb, is_dir,
1281 get_pages(sbi, is_dir ?
1282 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA));
1283 return -EIO;
1284 }
1285
1286 spin_lock(&sbi->inode_lock[type]);
1287
1288 head = &sbi->inode_list[type];
1289 if (list_empty(head)) {
1290 spin_unlock(&sbi->inode_lock[type]);
1291 trace_f2fs_sync_dirty_inodes_exit(sbi->sb, is_dir,
1292 get_pages(sbi, is_dir ?
1293 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA));
1294 return 0;
1295 }
1296 fi = list_first_entry(head, struct f2fs_inode_info, dirty_list);
1297 inode = igrab(&fi->vfs_inode);
1298 spin_unlock(&sbi->inode_lock[type]);
1299 if (inode) {
1300 unsigned long cur_ino = inode->i_ino;
1301
1302 if (from_cp)
1303 F2FS_I(inode)->cp_task = current;
1304 F2FS_I(inode)->wb_task = current;
1305
1306 filemap_fdatawrite(inode->i_mapping);
1307
1308 F2FS_I(inode)->wb_task = NULL;
1309 if (from_cp)
1310 F2FS_I(inode)->cp_task = NULL;
1311
1312 iput(inode);
1313 /* We need to give cpu to another writers. */
1314 if (ino == cur_ino)
1315 cond_resched();
1316 else
1317 ino = cur_ino;
1318 } else {
1319 /*
1320 * We should submit bio, since it exists several
1321 * writebacking dentry pages in the freeing inode.
1322 */
1323 f2fs_submit_merged_write(sbi, DATA);
1324 cond_resched();
1325 }
1326 goto retry;
1327}
1328
1329static int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi)
1330{
1331 struct list_head *head = &sbi->inode_list[DIRTY_META];
1332 struct inode *inode;
1333 struct f2fs_inode_info *fi;
1334 s64 total = get_pages(sbi, F2FS_DIRTY_IMETA);
1335
1336 while (total--) {
1337 if (unlikely(f2fs_cp_error(sbi)))
1338 return -EIO;
1339
1340 spin_lock(&sbi->inode_lock[DIRTY_META]);
1341 if (list_empty(head)) {
1342 spin_unlock(&sbi->inode_lock[DIRTY_META]);
1343 return 0;
1344 }
1345 fi = list_first_entry(head, struct f2fs_inode_info,
1346 gdirty_list);
1347 inode = igrab(&fi->vfs_inode);
1348 spin_unlock(&sbi->inode_lock[DIRTY_META]);
1349 if (inode) {
1350 sync_inode_metadata(inode, 0);
1351
1352 /* it's on eviction */
1353 if (is_inode_flag_set(inode, FI_DIRTY_INODE))
1354 f2fs_update_inode_page(inode);
1355 iput(inode);
1356 }
1357 }
1358 return 0;
1359}
1360
1361static void __prepare_cp_block(struct f2fs_sb_info *sbi)
1362{
1363 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1364 struct f2fs_nm_info *nm_i = NM_I(sbi);
1365 nid_t last_nid = nm_i->next_scan_nid;
1366
1367 next_free_nid(sbi, &last_nid);
1368 ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi));
1369 ckpt->valid_node_count = cpu_to_le32(valid_node_count(sbi));
1370 ckpt->valid_inode_count = cpu_to_le32(valid_inode_count(sbi));
1371 ckpt->next_free_nid = cpu_to_le32(last_nid);
1372
1373 /* update user_block_counts */
1374 sbi->last_valid_block_count = sbi->total_valid_block_count;
1375 percpu_counter_set(&sbi->alloc_valid_block_count, 0);
1376 percpu_counter_set(&sbi->rf_node_block_count, 0);
1377}
1378
1379static bool __need_flush_quota(struct f2fs_sb_info *sbi)
1380{
1381 bool ret = false;
1382
1383 if (!is_journalled_quota(sbi))
1384 return false;
1385
1386 if (!f2fs_down_write_trylock(&sbi->quota_sem))
1387 return true;
1388 if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH)) {
1389 ret = false;
1390 } else if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR)) {
1391 ret = false;
1392 } else if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_FLUSH)) {
1393 clear_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
1394 ret = true;
1395 } else if (get_pages(sbi, F2FS_DIRTY_QDATA)) {
1396 ret = true;
1397 }
1398 f2fs_up_write(&sbi->quota_sem);
1399 return ret;
1400}
1401
1402/*
1403 * Freeze all the FS-operations for checkpoint.
1404 */
1405static int block_operations(struct f2fs_sb_info *sbi)
1406{
1407 struct writeback_control wbc = {
1408 .sync_mode = WB_SYNC_ALL,
1409 .nr_to_write = LONG_MAX,
1410 };
1411 int err = 0, cnt = 0;
1412
1413 /*
1414 * Let's flush inline_data in dirty node pages.
1415 */
1416 f2fs_flush_inline_data(sbi);
1417
1418retry_flush_quotas:
1419 f2fs_lock_all(sbi);
1420 if (__need_flush_quota(sbi)) {
1421 bool need_lock = sbi->umount_lock_holder != current;
1422
1423 if (++cnt > DEFAULT_RETRY_QUOTA_FLUSH_COUNT) {
1424 set_sbi_flag(sbi, SBI_QUOTA_SKIP_FLUSH);
1425 set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
1426 goto retry_flush_dents;
1427 }
1428 f2fs_unlock_all(sbi);
1429
1430 /* don't grab s_umount lock during mount/umount/remount/freeze/quotactl */
1431 if (!need_lock) {
1432 f2fs_do_quota_sync(sbi->sb, -1);
1433 } else if (down_read_trylock(&sbi->sb->s_umount)) {
1434 f2fs_do_quota_sync(sbi->sb, -1);
1435 up_read(&sbi->sb->s_umount);
1436 }
1437 cond_resched();
1438 goto retry_flush_quotas;
1439 }
1440
1441retry_flush_dents:
1442 /* write all the dirty dentry pages */
1443 if (get_pages(sbi, F2FS_DIRTY_DENTS)) {
1444 f2fs_unlock_all(sbi);
1445 err = f2fs_sync_dirty_inodes(sbi, DIR_INODE, true);
1446 if (err)
1447 return err;
1448 cond_resched();
1449 goto retry_flush_quotas;
1450 }
1451
1452 /*
1453 * POR: we should ensure that there are no dirty node pages
1454 * until finishing nat/sit flush. inode->i_blocks can be updated.
1455 */
1456 f2fs_down_write(&sbi->node_change);
1457
1458 if (get_pages(sbi, F2FS_DIRTY_IMETA)) {
1459 f2fs_up_write(&sbi->node_change);
1460 f2fs_unlock_all(sbi);
1461 err = f2fs_sync_inode_meta(sbi);
1462 if (err)
1463 return err;
1464 cond_resched();
1465 goto retry_flush_quotas;
1466 }
1467
1468retry_flush_nodes:
1469 f2fs_down_write(&sbi->node_write);
1470
1471 if (get_pages(sbi, F2FS_DIRTY_NODES)) {
1472 f2fs_up_write(&sbi->node_write);
1473 atomic_inc(&sbi->wb_sync_req[NODE]);
1474 err = f2fs_sync_node_pages(sbi, &wbc, false, FS_CP_NODE_IO);
1475 atomic_dec(&sbi->wb_sync_req[NODE]);
1476 if (err) {
1477 f2fs_up_write(&sbi->node_change);
1478 f2fs_unlock_all(sbi);
1479 return err;
1480 }
1481 cond_resched();
1482 goto retry_flush_nodes;
1483 }
1484
1485 /*
1486 * sbi->node_change is used only for AIO write_begin path which produces
1487 * dirty node blocks and some checkpoint values by block allocation.
1488 */
1489 __prepare_cp_block(sbi);
1490 f2fs_up_write(&sbi->node_change);
1491 return err;
1492}
1493
1494static void unblock_operations(struct f2fs_sb_info *sbi)
1495{
1496 f2fs_up_write(&sbi->node_write);
1497 f2fs_unlock_all(sbi);
1498}
1499
1500void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type)
1501{
1502 DEFINE_WAIT(wait);
1503
1504 for (;;) {
1505 if (!get_pages(sbi, type))
1506 break;
1507
1508 if (unlikely(f2fs_cp_error(sbi) &&
1509 !is_sbi_flag_set(sbi, SBI_IS_CLOSE)))
1510 break;
1511
1512 if (type == F2FS_DIRTY_META)
1513 f2fs_sync_meta_pages(sbi, LONG_MAX, FS_CP_META_IO);
1514 else if (type == F2FS_WB_CP_DATA)
1515 f2fs_submit_merged_write(sbi, DATA);
1516
1517 prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE);
1518 io_schedule_timeout(DEFAULT_SCHEDULE_TIMEOUT);
1519 }
1520 finish_wait(&sbi->cp_wait, &wait);
1521}
1522
1523static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1524{
1525 unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num;
1526 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1527 unsigned long flags;
1528
1529 spin_lock_irqsave(&sbi->cp_lock, flags);
1530
1531 if ((cpc->reason & CP_UMOUNT) &&
1532 le32_to_cpu(ckpt->cp_pack_total_block_count) >
1533 sbi->blocks_per_seg - NM_I(sbi)->nat_bits_blocks)
1534 disable_nat_bits(sbi, false);
1535
1536 if (cpc->reason & CP_TRIMMED)
1537 __set_ckpt_flags(ckpt, CP_TRIMMED_FLAG);
1538 else
1539 __clear_ckpt_flags(ckpt, CP_TRIMMED_FLAG);
1540
1541 if (cpc->reason & CP_UMOUNT)
1542 __set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
1543 else
1544 __clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
1545
1546 if (cpc->reason & CP_FASTBOOT)
1547 __set_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
1548 else
1549 __clear_ckpt_flags(ckpt, CP_FASTBOOT_FLAG);
1550
1551 if (orphan_num)
1552 __set_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
1553 else
1554 __clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
1555
1556 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
1557 __set_ckpt_flags(ckpt, CP_FSCK_FLAG);
1558
1559 if (is_sbi_flag_set(sbi, SBI_IS_RESIZEFS))
1560 __set_ckpt_flags(ckpt, CP_RESIZEFS_FLAG);
1561 else
1562 __clear_ckpt_flags(ckpt, CP_RESIZEFS_FLAG);
1563
1564 if (is_sbi_flag_set(sbi, SBI_CP_DISABLED))
1565 __set_ckpt_flags(ckpt, CP_DISABLED_FLAG);
1566 else
1567 __clear_ckpt_flags(ckpt, CP_DISABLED_FLAG);
1568
1569 if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK))
1570 __set_ckpt_flags(ckpt, CP_DISABLED_QUICK_FLAG);
1571 else
1572 __clear_ckpt_flags(ckpt, CP_DISABLED_QUICK_FLAG);
1573
1574 if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH))
1575 __set_ckpt_flags(ckpt, CP_QUOTA_NEED_FSCK_FLAG);
1576 else
1577 __clear_ckpt_flags(ckpt, CP_QUOTA_NEED_FSCK_FLAG);
1578
1579 if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR))
1580 __set_ckpt_flags(ckpt, CP_QUOTA_NEED_FSCK_FLAG);
1581
1582 /* set this flag to activate crc|cp_ver for recovery */
1583 __set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG);
1584 __clear_ckpt_flags(ckpt, CP_NOCRC_RECOVERY_FLAG);
1585
1586 spin_unlock_irqrestore(&sbi->cp_lock, flags);
1587}
1588
1589static void commit_checkpoint(struct f2fs_sb_info *sbi,
1590 void *src, block_t blk_addr)
1591{
1592 struct writeback_control wbc = {};
1593
1594 /*
1595 * filemap_get_folios_tag and folio_lock again will take
1596 * some extra time. Therefore, f2fs_update_meta_pages and
1597 * f2fs_sync_meta_pages are combined in this function.
1598 */
1599 struct folio *folio = f2fs_grab_meta_folio(sbi, blk_addr);
1600
1601 memcpy(folio_address(folio), src, PAGE_SIZE);
1602
1603 folio_mark_dirty(folio);
1604 if (unlikely(!folio_clear_dirty_for_io(folio)))
1605 f2fs_bug_on(sbi, 1);
1606
1607 /* writeout cp pack 2 page */
1608 if (unlikely(!__f2fs_write_meta_folio(folio, &wbc, FS_CP_META_IO))) {
1609 if (f2fs_cp_error(sbi)) {
1610 f2fs_folio_put(folio, true);
1611 return;
1612 }
1613 f2fs_bug_on(sbi, true);
1614 }
1615
1616 f2fs_folio_put(folio, false);
1617
1618 /* submit checkpoint (with barrier if NOBARRIER is not set) */
1619 f2fs_submit_merged_write(sbi, META_FLUSH);
1620}
1621
1622static inline u64 get_sectors_written(struct block_device *bdev)
1623{
1624 return (u64)part_stat_read(bdev, sectors[STAT_WRITE]);
1625}
1626
1627u64 f2fs_get_sectors_written(struct f2fs_sb_info *sbi)
1628{
1629 if (f2fs_is_multi_device(sbi)) {
1630 u64 sectors = 0;
1631 int i;
1632
1633 for (i = 0; i < sbi->s_ndevs; i++)
1634 sectors += get_sectors_written(FDEV(i).bdev);
1635
1636 return sectors;
1637 }
1638
1639 return get_sectors_written(sbi->sb->s_bdev);
1640}
1641
1642static inline void stat_cp_time(struct cp_control *cpc, enum cp_time type)
1643{
1644 cpc->stats.times[type] = ktime_get();
1645}
1646
1647static inline void check_cp_time(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1648{
1649 unsigned long long sb_diff, cur_diff;
1650 enum cp_time ct;
1651
1652 sb_diff = (u64)ktime_ms_delta(sbi->cp_stats.times[CP_TIME_END],
1653 sbi->cp_stats.times[CP_TIME_START]);
1654 cur_diff = (u64)ktime_ms_delta(cpc->stats.times[CP_TIME_END],
1655 cpc->stats.times[CP_TIME_START]);
1656
1657 if (cur_diff > sb_diff) {
1658 sbi->cp_stats = cpc->stats;
1659 if (cur_diff < CP_LONG_LATENCY_THRESHOLD)
1660 return;
1661
1662 f2fs_warn(sbi, "checkpoint was blocked for %llu ms", cur_diff);
1663 for (ct = CP_TIME_START; ct < CP_TIME_MAX - 1; ct++)
1664 f2fs_warn(sbi, "Step#%d: %llu ms", ct,
1665 (u64)ktime_ms_delta(cpc->stats.times[ct + 1],
1666 cpc->stats.times[ct]));
1667 }
1668}
1669
1670static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1671{
1672 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1673 struct f2fs_nm_info *nm_i = NM_I(sbi);
1674 unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num, flags;
1675 block_t start_blk;
1676 unsigned int data_sum_blocks, orphan_blocks;
1677 __u32 crc32 = 0;
1678 int i;
1679 int cp_payload_blks = __cp_payload(sbi);
1680 struct curseg_info *seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
1681 u64 kbytes_written;
1682 int err;
1683
1684 /* Flush all the NAT/SIT pages */
1685 f2fs_sync_meta_pages(sbi, LONG_MAX, FS_CP_META_IO);
1686
1687 stat_cp_time(cpc, CP_TIME_SYNC_META);
1688
1689 /* start to update checkpoint, cp ver is already updated previously */
1690 ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi, true));
1691 ckpt->free_segment_count = cpu_to_le32(free_segments(sbi));
1692 for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
1693 struct curseg_info *curseg = CURSEG_I(sbi, i + CURSEG_HOT_NODE);
1694
1695 ckpt->cur_node_segno[i] = cpu_to_le32(curseg->segno);
1696 ckpt->cur_node_blkoff[i] = cpu_to_le16(curseg->next_blkoff);
1697 ckpt->alloc_type[i + CURSEG_HOT_NODE] = curseg->alloc_type;
1698 }
1699 for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
1700 struct curseg_info *curseg = CURSEG_I(sbi, i + CURSEG_HOT_DATA);
1701
1702 ckpt->cur_data_segno[i] = cpu_to_le32(curseg->segno);
1703 ckpt->cur_data_blkoff[i] = cpu_to_le16(curseg->next_blkoff);
1704 ckpt->alloc_type[i + CURSEG_HOT_DATA] = curseg->alloc_type;
1705 }
1706
1707 /* 2 cp + n data seg summary + orphan inode blocks */
1708 data_sum_blocks = f2fs_npages_for_summary_flush(sbi, false);
1709 spin_lock_irqsave(&sbi->cp_lock, flags);
1710 if (data_sum_blocks < NR_CURSEG_DATA_TYPE)
1711 __set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
1712 else
1713 __clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG);
1714 spin_unlock_irqrestore(&sbi->cp_lock, flags);
1715
1716 orphan_blocks = GET_ORPHAN_BLOCKS(orphan_num);
1717 ckpt->cp_pack_start_sum = cpu_to_le32(1 + cp_payload_blks +
1718 orphan_blocks);
1719
1720 if (__remain_node_summaries(cpc->reason))
1721 ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS +
1722 cp_payload_blks + data_sum_blocks +
1723 orphan_blocks + NR_CURSEG_NODE_TYPE);
1724 else
1725 ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS +
1726 cp_payload_blks + data_sum_blocks +
1727 orphan_blocks);
1728
1729 /* update ckpt flag for checkpoint */
1730 update_ckpt_flags(sbi, cpc);
1731
1732 /* update SIT/NAT bitmap */
1733 get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP));
1734 get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP));
1735
1736 crc32 = f2fs_checkpoint_chksum(ckpt);
1737 *((__le32 *)((unsigned char *)ckpt +
1738 le32_to_cpu(ckpt->checksum_offset)))
1739 = cpu_to_le32(crc32);
1740
1741 start_blk = __start_cp_next_addr(sbi);
1742
1743 /* write nat bits */
1744 if (enabled_nat_bits(sbi, cpc)) {
1745 __u64 cp_ver = cur_cp_version(ckpt);
1746 block_t blk;
1747
1748 cp_ver |= ((__u64)crc32 << 32);
1749 *(__le64 *)nm_i->nat_bits = cpu_to_le64(cp_ver);
1750
1751 blk = start_blk + BLKS_PER_SEG(sbi) - nm_i->nat_bits_blocks;
1752 for (i = 0; i < nm_i->nat_bits_blocks; i++)
1753 f2fs_update_meta_page(sbi, nm_i->nat_bits +
1754 F2FS_BLK_TO_BYTES(i), blk + i);
1755 }
1756
1757 /* write out checkpoint buffer at block 0 */
1758 f2fs_update_meta_page(sbi, ckpt, start_blk++);
1759
1760 for (i = 1; i < 1 + cp_payload_blks; i++)
1761 f2fs_update_meta_page(sbi, (char *)ckpt + i * F2FS_BLKSIZE,
1762 start_blk++);
1763
1764 if (orphan_num) {
1765 write_orphan_inodes(sbi, start_blk);
1766 start_blk += orphan_blocks;
1767 }
1768
1769 f2fs_write_data_summaries(sbi, start_blk);
1770 start_blk += data_sum_blocks;
1771
1772 /* Record write statistics in the hot node summary */
1773 kbytes_written = sbi->kbytes_written;
1774 kbytes_written += (f2fs_get_sectors_written(sbi) -
1775 sbi->sectors_written_start) >> 1;
1776 seg_i->journal->info.kbytes_written = cpu_to_le64(kbytes_written);
1777
1778 if (__remain_node_summaries(cpc->reason)) {
1779 f2fs_write_node_summaries(sbi, start_blk);
1780 start_blk += NR_CURSEG_NODE_TYPE;
1781 }
1782
1783 /* Here, we have one bio having CP pack except cp pack 2 page */
1784 f2fs_sync_meta_pages(sbi, LONG_MAX, FS_CP_META_IO);
1785 stat_cp_time(cpc, CP_TIME_SYNC_CP_META);
1786
1787 /* Wait for all dirty meta pages to be submitted for IO */
1788 f2fs_wait_on_all_pages(sbi, F2FS_DIRTY_META);
1789 stat_cp_time(cpc, CP_TIME_WAIT_DIRTY_META);
1790
1791 /* wait for previous submitted meta pages writeback */
1792 f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
1793 stat_cp_time(cpc, CP_TIME_WAIT_CP_DATA);
1794
1795 /* flush all device cache */
1796 err = f2fs_flush_device_cache(sbi);
1797 if (err)
1798 return err;
1799 stat_cp_time(cpc, CP_TIME_FLUSH_DEVICE);
1800
1801 /* barrier and flush checkpoint cp pack 2 page if it can */
1802 commit_checkpoint(sbi, ckpt, start_blk);
1803 f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
1804 stat_cp_time(cpc, CP_TIME_WAIT_LAST_CP);
1805
1806 /*
1807 * invalidate intermediate page cache borrowed from meta inode which are
1808 * used for migration of encrypted, verity or compressed inode's blocks.
1809 */
1810 if (f2fs_sb_has_encrypt(sbi) || f2fs_sb_has_verity(sbi) ||
1811 f2fs_sb_has_compression(sbi))
1812 f2fs_bug_on(sbi,
1813 invalidate_inode_pages2_range(META_MAPPING(sbi),
1814 MAIN_BLKADDR(sbi), MAX_BLKADDR(sbi) - 1));
1815
1816 f2fs_release_ino_entry(sbi, false);
1817
1818 f2fs_reset_fsync_node_info(sbi);
1819
1820 clear_sbi_flag(sbi, SBI_IS_DIRTY);
1821 clear_sbi_flag(sbi, SBI_NEED_CP);
1822 clear_sbi_flag(sbi, SBI_QUOTA_SKIP_FLUSH);
1823
1824 spin_lock(&sbi->stat_lock);
1825 sbi->unusable_block_count = 0;
1826 spin_unlock(&sbi->stat_lock);
1827
1828 __set_cp_next_pack(sbi);
1829
1830 /*
1831 * redirty superblock if metadata like node page or inode cache is
1832 * updated during writing checkpoint.
1833 */
1834 if (get_pages(sbi, F2FS_DIRTY_NODES) ||
1835 get_pages(sbi, F2FS_DIRTY_IMETA))
1836 set_sbi_flag(sbi, SBI_IS_DIRTY);
1837
1838 f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_DENTS));
1839
1840 return unlikely(f2fs_cp_error(sbi)) ? -EIO : 0;
1841}
1842
1843int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1844{
1845 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1846 struct f2fs_lock_context lc;
1847 unsigned long long ckpt_ver;
1848 int err = 0;
1849
1850 stat_cp_time(cpc, CP_TIME_START);
1851
1852 if (f2fs_readonly(sbi->sb) || f2fs_hw_is_readonly(sbi))
1853 return -EROFS;
1854
1855 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
1856 if (cpc->reason != CP_PAUSE)
1857 return 0;
1858 f2fs_warn(sbi, "Start checkpoint disabled!");
1859 }
1860 if (cpc->reason != CP_RESIZE)
1861 f2fs_down_write_trace(&sbi->cp_global_sem, &lc);
1862
1863 stat_cp_time(cpc, CP_TIME_LOCK);
1864
1865 if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) &&
1866 ((cpc->reason & CP_FASTBOOT) || (cpc->reason & CP_SYNC) ||
1867 ((cpc->reason & CP_DISCARD) && !sbi->discard_blks)))
1868 goto out;
1869 if (unlikely(f2fs_cp_error(sbi))) {
1870 err = -EIO;
1871 goto out;
1872 }
1873
1874 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, CP_PHASE_START_BLOCK_OPS);
1875
1876 err = block_operations(sbi);
1877 if (err)
1878 goto out;
1879
1880 stat_cp_time(cpc, CP_TIME_OP_LOCK);
1881
1882 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, CP_PHASE_FINISH_BLOCK_OPS);
1883
1884 f2fs_flush_merged_writes(sbi);
1885
1886 /* this is the case of multiple fstrims without any changes */
1887 if (cpc->reason & CP_DISCARD) {
1888 if (!f2fs_exist_trim_candidates(sbi, cpc)) {
1889 unblock_operations(sbi);
1890 goto out;
1891 }
1892
1893 if (NM_I(sbi)->nat_cnt[DIRTY_NAT] == 0 &&
1894 SIT_I(sbi)->dirty_sentries == 0 &&
1895 prefree_segments(sbi) == 0) {
1896 f2fs_flush_sit_entries(sbi, cpc);
1897 f2fs_clear_prefree_segments(sbi, cpc);
1898 unblock_operations(sbi);
1899 goto out;
1900 }
1901 }
1902 stat_cp_time(cpc, CP_TIME_MERGE_WRITE);
1903
1904 /*
1905 * update checkpoint pack index
1906 * Increase the version number so that
1907 * SIT entries and seg summaries are written at correct place
1908 */
1909 ckpt_ver = cur_cp_version(ckpt);
1910 ckpt->checkpoint_ver = cpu_to_le64(++ckpt_ver);
1911
1912 /* write cached NAT/SIT entries to NAT/SIT area */
1913 err = f2fs_flush_nat_entries(sbi, cpc);
1914 if (err) {
1915 f2fs_err(sbi, "f2fs_flush_nat_entries failed err:%d, stop checkpoint", err);
1916 f2fs_bug_on(sbi, !f2fs_cp_error(sbi));
1917 goto stop;
1918 }
1919 stat_cp_time(cpc, CP_TIME_FLUSH_NAT);
1920
1921 f2fs_flush_sit_entries(sbi, cpc);
1922
1923 stat_cp_time(cpc, CP_TIME_FLUSH_SIT);
1924
1925 /* save inmem log status */
1926 f2fs_save_inmem_curseg(sbi);
1927
1928 err = do_checkpoint(sbi, cpc);
1929 if (err) {
1930 f2fs_err(sbi, "do_checkpoint failed err:%d, stop checkpoint", err);
1931 f2fs_bug_on(sbi, !f2fs_cp_error(sbi));
1932 f2fs_release_discard_addrs(sbi);
1933 } else {
1934 f2fs_clear_prefree_segments(sbi, cpc);
1935 }
1936
1937 f2fs_restore_inmem_curseg(sbi);
1938 f2fs_reinit_atgc_curseg(sbi);
1939 stat_inc_cp_count(sbi);
1940stop:
1941 unblock_operations(sbi);
1942 stat_cp_time(cpc, CP_TIME_END);
1943 check_cp_time(sbi, cpc);
1944
1945 if (cpc->reason & CP_RECOVERY)
1946 f2fs_notice(sbi, "checkpoint: version = %llx", ckpt_ver);
1947
1948 /* update CP_TIME to trigger checkpoint periodically */
1949 f2fs_update_time(sbi, CP_TIME);
1950 trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, CP_PHASE_FINISH_CHECKPOINT);
1951out:
1952 if (cpc->reason != CP_RESIZE)
1953 f2fs_up_write_trace(&sbi->cp_global_sem, &lc);
1954 return err;
1955}
1956
1957void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi)
1958{
1959 int i;
1960
1961 for (i = 0; i < MAX_INO_ENTRY; i++) {
1962 struct inode_management *im = &sbi->im[i];
1963
1964 INIT_RADIX_TREE(&im->ino_root, GFP_ATOMIC);
1965 spin_lock_init(&im->ino_lock);
1966 INIT_LIST_HEAD(&im->ino_list);
1967 im->ino_num = 0;
1968 }
1969
1970 sbi->max_orphans = (BLKS_PER_SEG(sbi) - F2FS_CP_PACKS -
1971 NR_CURSEG_PERSIST_TYPE - __cp_payload(sbi)) *
1972 F2FS_ORPHANS_PER_BLOCK;
1973}
1974
1975int __init f2fs_create_checkpoint_caches(void)
1976{
1977 ino_entry_slab = f2fs_kmem_cache_create("f2fs_ino_entry",
1978 sizeof(struct ino_entry));
1979 if (!ino_entry_slab)
1980 return -ENOMEM;
1981 f2fs_inode_entry_slab = f2fs_kmem_cache_create("f2fs_inode_entry",
1982 sizeof(struct inode_entry));
1983 if (!f2fs_inode_entry_slab) {
1984 kmem_cache_destroy(ino_entry_slab);
1985 return -ENOMEM;
1986 }
1987 return 0;
1988}
1989
1990void f2fs_destroy_checkpoint_caches(void)
1991{
1992 kmem_cache_destroy(ino_entry_slab);
1993 kmem_cache_destroy(f2fs_inode_entry_slab);
1994}
1995
1996static int __write_checkpoint_sync(struct f2fs_sb_info *sbi)
1997{
1998 struct cp_control cpc = { .reason = CP_SYNC, };
1999 struct f2fs_lock_context lc;
2000 int err;
2001
2002 f2fs_down_write_trace(&sbi->gc_lock, &lc);
2003 err = f2fs_write_checkpoint(sbi, &cpc);
2004 f2fs_up_write_trace(&sbi->gc_lock, &lc);
2005
2006 return err;
2007}
2008
2009static void __checkpoint_and_complete_reqs(struct f2fs_sb_info *sbi)
2010{
2011 struct ckpt_req_control *cprc = &sbi->cprc_info;
2012 struct ckpt_req *req, *next;
2013 struct llist_node *dispatch_list;
2014 u64 sum_diff = 0, diff, count = 0;
2015 int ret;
2016
2017 dispatch_list = llist_del_all(&cprc->issue_list);
2018 if (!dispatch_list)
2019 return;
2020 dispatch_list = llist_reverse_order(dispatch_list);
2021
2022 ret = __write_checkpoint_sync(sbi);
2023 atomic_inc(&cprc->issued_ckpt);
2024
2025 llist_for_each_entry_safe(req, next, dispatch_list, llnode) {
2026 diff = (u64)ktime_ms_delta(ktime_get(), req->queue_time);
2027 req->ret = ret;
2028 req->delta_time = diff;
2029 complete(&req->wait);
2030
2031 sum_diff += diff;
2032 count++;
2033 }
2034 atomic_sub(count, &cprc->queued_ckpt);
2035 atomic_add(count, &cprc->total_ckpt);
2036
2037 spin_lock(&cprc->stat_lock);
2038 cprc->cur_time = (unsigned int)div64_u64(sum_diff, count);
2039 if (cprc->peak_time < cprc->cur_time)
2040 cprc->peak_time = cprc->cur_time;
2041 spin_unlock(&cprc->stat_lock);
2042}
2043
2044static int issue_checkpoint_thread(void *data)
2045{
2046 struct f2fs_sb_info *sbi = data;
2047 struct ckpt_req_control *cprc = &sbi->cprc_info;
2048 wait_queue_head_t *q = &cprc->ckpt_wait_queue;
2049repeat:
2050 if (kthread_should_stop())
2051 return 0;
2052
2053 if (!llist_empty(&cprc->issue_list))
2054 __checkpoint_and_complete_reqs(sbi);
2055
2056 wait_event_interruptible(*q,
2057 kthread_should_stop() || !llist_empty(&cprc->issue_list));
2058 goto repeat;
2059}
2060
2061static void flush_remained_ckpt_reqs(struct f2fs_sb_info *sbi,
2062 struct ckpt_req *wait_req)
2063{
2064 struct ckpt_req_control *cprc = &sbi->cprc_info;
2065
2066 if (!llist_empty(&cprc->issue_list)) {
2067 __checkpoint_and_complete_reqs(sbi);
2068 } else {
2069 /* already dispatched by issue_checkpoint_thread */
2070 if (wait_req)
2071 wait_for_completion(&wait_req->wait);
2072 }
2073}
2074
2075static void init_ckpt_req(struct ckpt_req *req)
2076{
2077 memset(req, 0, sizeof(struct ckpt_req));
2078
2079 init_completion(&req->wait);
2080 req->queue_time = ktime_get();
2081}
2082
2083int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi)
2084{
2085 struct ckpt_req_control *cprc = &sbi->cprc_info;
2086 struct ckpt_req req;
2087 struct cp_control cpc;
2088
2089 cpc.reason = __get_cp_reason(sbi);
2090 if (!test_opt(sbi, MERGE_CHECKPOINT) || cpc.reason != CP_SYNC ||
2091 sbi->umount_lock_holder == current) {
2092 struct f2fs_lock_context lc;
2093 int ret;
2094
2095 f2fs_down_write_trace(&sbi->gc_lock, &lc);
2096 ret = f2fs_write_checkpoint(sbi, &cpc);
2097 f2fs_up_write_trace(&sbi->gc_lock, &lc);
2098
2099 return ret;
2100 }
2101
2102 if (!cprc->f2fs_issue_ckpt)
2103 return __write_checkpoint_sync(sbi);
2104
2105 init_ckpt_req(&req);
2106
2107 llist_add(&req.llnode, &cprc->issue_list);
2108 atomic_inc(&cprc->queued_ckpt);
2109
2110 /*
2111 * update issue_list before we wake up issue_checkpoint thread,
2112 * this smp_mb() pairs with another barrier in ___wait_event(),
2113 * see more details in comments of waitqueue_active().
2114 */
2115 smp_mb();
2116
2117 if (waitqueue_active(&cprc->ckpt_wait_queue))
2118 wake_up(&cprc->ckpt_wait_queue);
2119
2120 if (cprc->f2fs_issue_ckpt)
2121 wait_for_completion(&req.wait);
2122 else
2123 flush_remained_ckpt_reqs(sbi, &req);
2124
2125 if (unlikely(req.delta_time >= CP_LONG_LATENCY_THRESHOLD)) {
2126 f2fs_warn_ratelimited(sbi,
2127 "blocked on checkpoint for %u ms", cprc->peak_time);
2128 dump_stack();
2129 }
2130
2131 return req.ret;
2132}
2133
2134int f2fs_start_ckpt_thread(struct f2fs_sb_info *sbi)
2135{
2136 dev_t dev = sbi->sb->s_bdev->bd_dev;
2137 struct ckpt_req_control *cprc = &sbi->cprc_info;
2138
2139 if (cprc->f2fs_issue_ckpt)
2140 return 0;
2141
2142 cprc->f2fs_issue_ckpt = kthread_run(issue_checkpoint_thread, sbi,
2143 "f2fs_ckpt-%u:%u", MAJOR(dev), MINOR(dev));
2144 if (IS_ERR(cprc->f2fs_issue_ckpt)) {
2145 int err = PTR_ERR(cprc->f2fs_issue_ckpt);
2146
2147 cprc->f2fs_issue_ckpt = NULL;
2148 return err;
2149 }
2150
2151 set_task_ioprio(cprc->f2fs_issue_ckpt, cprc->ckpt_thread_ioprio);
2152 set_user_nice(cprc->f2fs_issue_ckpt,
2153 PRIO_TO_NICE(sbi->critical_task_priority));
2154
2155 return 0;
2156}
2157
2158void f2fs_stop_ckpt_thread(struct f2fs_sb_info *sbi)
2159{
2160 struct ckpt_req_control *cprc = &sbi->cprc_info;
2161 struct task_struct *ckpt_task;
2162
2163 if (!cprc->f2fs_issue_ckpt)
2164 return;
2165
2166 ckpt_task = cprc->f2fs_issue_ckpt;
2167 cprc->f2fs_issue_ckpt = NULL;
2168 kthread_stop(ckpt_task);
2169
2170 f2fs_flush_ckpt_thread(sbi);
2171}
2172
2173void f2fs_flush_ckpt_thread(struct f2fs_sb_info *sbi)
2174{
2175 struct ckpt_req_control *cprc = &sbi->cprc_info;
2176
2177 flush_remained_ckpt_reqs(sbi, NULL);
2178
2179 /* Let's wait for the previous dispatched checkpoint. */
2180 while (atomic_read(&cprc->queued_ckpt))
2181 io_schedule_timeout(DEFAULT_SCHEDULE_TIMEOUT);
2182}
2183
2184void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi)
2185{
2186 struct ckpt_req_control *cprc = &sbi->cprc_info;
2187
2188 atomic_set(&cprc->issued_ckpt, 0);
2189 atomic_set(&cprc->total_ckpt, 0);
2190 atomic_set(&cprc->queued_ckpt, 0);
2191 cprc->ckpt_thread_ioprio = DEFAULT_CHECKPOINT_IOPRIO;
2192 init_waitqueue_head(&cprc->ckpt_wait_queue);
2193 init_llist_head(&cprc->issue_list);
2194 spin_lock_init(&cprc->stat_lock);
2195}