Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

quota: Fix race of dquot_scan_active() with quota deactivation

dquot_scan_active() can race with quota deactivation in
quota_release_workfn() like:

CPU0 (quota_release_workfn) CPU1 (dquot_scan_active)
============================== ==============================
spin_lock(&dq_list_lock);
list_replace_init(
&releasing_dquots, &rls_head);
/* dquot X on rls_head,
dq_count == 0,
DQ_ACTIVE_B still set */
spin_unlock(&dq_list_lock);
synchronize_srcu(&dquot_srcu);
spin_lock(&dq_list_lock);
list_for_each_entry(dquot,
&inuse_list, dq_inuse) {
/* finds dquot X */
dquot_active(X) -> true
atomic_inc(&X->dq_count);
}
spin_unlock(&dq_list_lock);
spin_lock(&dq_list_lock);
dquot = list_first_entry(&rls_head);
WARN_ON_ONCE(atomic_read(&dquot->dq_count));

The problem is not only a cosmetic one as under memory pressure the
caller of dquot_scan_active() can end up working on freed dquot.

Fix the problem by making sure the dquot is removed from releasing list
when we acquire a reference to it.

Fixes: 869b6ea1609f ("quota: Fix slow quotaoff")
Reported-by: Sam Sun <samsun1006219@gmail.com>
Link: https://lore.kernel.org/all/CAEkJfYPTt3uP1vAYnQ5V2ZWn5O9PLhhGi5HbOcAzyP9vbXyjeg@mail.gmail.com
Signed-off-by: Jan Kara <jack@suse.cz>

Jan Kara e93ab401 08841b06

+31 -16
+30 -8
fs/quota/dquot.c
··· 363 363 return test_bit(DQ_ACTIVE_B, &dquot->dq_flags); 364 364 } 365 365 366 + static struct dquot *__dqgrab(struct dquot *dquot) 367 + { 368 + lockdep_assert_held(&dq_list_lock); 369 + if (!atomic_read(&dquot->dq_count)) 370 + remove_free_dquot(dquot); 371 + atomic_inc(&dquot->dq_count); 372 + return dquot; 373 + } 374 + 375 + /* 376 + * Get reference to dquot when we got pointer to it by some other means. The 377 + * dquot has to be active and the caller has to make sure it cannot get 378 + * deactivated under our hands. 379 + */ 380 + struct dquot *dqgrab(struct dquot *dquot) 381 + { 382 + spin_lock(&dq_list_lock); 383 + WARN_ON_ONCE(!dquot_active(dquot)); 384 + dquot = __dqgrab(dquot); 385 + spin_unlock(&dq_list_lock); 386 + 387 + return dquot; 388 + } 389 + EXPORT_SYMBOL_GPL(dqgrab); 390 + 366 391 static inline int dquot_dirty(struct dquot *dquot) 367 392 { 368 393 return test_bit(DQ_MOD_B, &dquot->dq_flags); ··· 666 641 continue; 667 642 if (dquot->dq_sb != sb) 668 643 continue; 669 - /* Now we have active dquot so we can just increase use count */ 670 - atomic_inc(&dquot->dq_count); 644 + __dqgrab(dquot); 671 645 spin_unlock(&dq_list_lock); 672 646 dqput(old_dquot); 673 647 old_dquot = dquot; 674 648 /* 675 649 * ->release_dquot() can be racing with us. Our reference 676 - * protects us from new calls to it so just wait for any 677 - * outstanding call and recheck the DQ_ACTIVE_B after that. 650 + * protects us from dquot_release() proceeding so just wait for 651 + * any outstanding call and recheck the DQ_ACTIVE_B after that. 678 652 */ 679 653 wait_on_dquot(dquot); 680 654 if (dquot_active(dquot)) { ··· 741 717 /* Now we have active dquot from which someone is 742 718 * holding reference so we can safely just increase 743 719 * use count */ 744 - dqgrab(dquot); 720 + __dqgrab(dquot); 745 721 spin_unlock(&dq_list_lock); 746 722 err = dquot_write_dquot(dquot); 747 723 if (err && !ret) ··· 987 963 spin_unlock(&dq_list_lock); 988 964 dqstats_inc(DQST_LOOKUPS); 989 965 } else { 990 - if (!atomic_read(&dquot->dq_count)) 991 - remove_free_dquot(dquot); 992 - atomic_inc(&dquot->dq_count); 966 + __dqgrab(dquot); 993 967 spin_unlock(&dq_list_lock); 994 968 dqstats_inc(DQST_CACHE_HITS); 995 969 dqstats_inc(DQST_LOOKUPS);
+1 -8
include/linux/quotaops.h
··· 44 44 bool dquot_initialize_needed(struct inode *inode); 45 45 void dquot_drop(struct inode *inode); 46 46 struct dquot *dqget(struct super_block *sb, struct kqid qid); 47 - static inline struct dquot *dqgrab(struct dquot *dquot) 48 - { 49 - /* Make sure someone else has active reference to dquot */ 50 - WARN_ON_ONCE(!atomic_read(&dquot->dq_count)); 51 - WARN_ON_ONCE(!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)); 52 - atomic_inc(&dquot->dq_count); 53 - return dquot; 54 - } 47 + struct dquot *dqgrab(struct dquot *dquot); 55 48 56 49 static inline bool dquot_is_busy(struct dquot *dquot) 57 50 {