Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'ceph-for-5.2-rc4' of git://github.com/ceph/ceph-client

Pull ceph fixes from Ilya Dryomov:
"A change to call iput() asynchronously to avoid a possible deadlock
when iput_final() needs to wait for in-flight I/O (e.g. readahead) and
a fixup for a cleanup that went into -rc1"

* tag 'ceph-for-5.2-rc4' of git://github.com/ceph/ceph-client:
ceph: fix error handling in ceph_get_caps()
ceph: avoid iput_final() while holding mutex or in dispatch thread
ceph: single workqueue for inode related works

+156 -135
+19 -15
fs/ceph/caps.c
··· 2738 2738 _got = 0; 2739 2739 ret = try_get_cap_refs(ci, need, want, endoff, 2740 2740 false, &_got); 2741 - if (ret == -EAGAIN) { 2741 + if (ret == -EAGAIN) 2742 2742 continue; 2743 - } else if (!ret) { 2744 - int err; 2745 - 2743 + if (!ret) { 2746 2744 DEFINE_WAIT_FUNC(wait, woken_wake_function); 2747 2745 add_wait_queue(&ci->i_cap_wq, &wait); 2748 2746 2749 - while (!(err = try_get_cap_refs(ci, need, want, endoff, 2747 + while (!(ret = try_get_cap_refs(ci, need, want, endoff, 2750 2748 true, &_got))) { 2751 2749 if (signal_pending(current)) { 2752 2750 ret = -ERESTARTSYS; ··· 2754 2756 } 2755 2757 2756 2758 remove_wait_queue(&ci->i_cap_wq, &wait); 2757 - if (err == -EAGAIN) 2759 + if (ret == -EAGAIN) 2758 2760 continue; 2759 2761 } 2760 - if (ret == -ESTALE) { 2761 - /* session was killed, try renew caps */ 2762 - ret = ceph_renew_caps(&ci->vfs_inode); 2763 - if (ret == 0) 2764 - continue; 2762 + if (ret < 0) { 2763 + if (ret == -ESTALE) { 2764 + /* session was killed, try renew caps */ 2765 + ret = ceph_renew_caps(&ci->vfs_inode); 2766 + if (ret == 0) 2767 + continue; 2768 + } 2765 2769 return ret; 2766 2770 } 2767 2771 ··· 2992 2992 } 2993 2993 if (complete_capsnap) 2994 2994 wake_up_all(&ci->i_cap_wq); 2995 - while (put-- > 0) 2996 - iput(inode); 2995 + while (put-- > 0) { 2996 + /* avoid calling iput_final() in osd dispatch threads */ 2997 + ceph_async_iput(inode); 2998 + } 2997 2999 } 2998 3000 2999 3001 /* ··· 3966 3964 done: 3967 3965 mutex_unlock(&session->s_mutex); 3968 3966 done_unlocked: 3969 - iput(inode); 3970 3967 ceph_put_string(extra_info.pool_ns); 3968 + /* avoid calling iput_final() in mds dispatch threads */ 3969 + ceph_async_iput(inode); 3971 3970 return; 3972 3971 3973 3972 flush_cap_releases: ··· 4014 4011 if (inode) { 4015 4012 dout("check_delayed_caps on %p\n", inode); 4016 4013 ceph_check_caps(ci, flags, NULL); 4017 - iput(inode); 4014 + /* avoid calling iput_final() in tick thread */ 4015 + ceph_async_iput(inode); 4018 4016 } 4019 4017 } 4020 4018 spin_unlock(&mdsc->cap_delay_lock);
+1 -1
fs/ceph/file.c
··· 791 791 if (aio_work) { 792 792 INIT_WORK(&aio_work->work, ceph_aio_retry_work); 793 793 aio_work->req = req; 794 - queue_work(ceph_inode_to_client(inode)->wb_wq, 794 + queue_work(ceph_inode_to_client(inode)->inode_wq, 795 795 &aio_work->work); 796 796 return; 797 797 }
+83 -72
fs/ceph/inode.c
··· 33 33 34 34 static const struct inode_operations ceph_symlink_iops; 35 35 36 - static void ceph_invalidate_work(struct work_struct *work); 37 - static void ceph_writeback_work(struct work_struct *work); 38 - static void ceph_vmtruncate_work(struct work_struct *work); 36 + static void ceph_inode_work(struct work_struct *work); 39 37 40 38 /* 41 39 * find or create an inode, given the ceph ino number ··· 507 509 INIT_LIST_HEAD(&ci->i_snap_realm_item); 508 510 INIT_LIST_HEAD(&ci->i_snap_flush_item); 509 511 510 - INIT_WORK(&ci->i_wb_work, ceph_writeback_work); 511 - INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work); 512 - 513 - INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work); 512 + INIT_WORK(&ci->i_work, ceph_inode_work); 513 + ci->i_work_mask = 0; 514 514 515 515 ceph_fscache_inode_init(ci); 516 516 ··· 1476 1480 pr_err("fill_inode badness on %p got %d\n", in, rc); 1477 1481 err = rc; 1478 1482 } 1479 - iput(in); 1483 + /* avoid calling iput_final() in mds dispatch threads */ 1484 + ceph_async_iput(in); 1480 1485 } 1481 1486 1482 1487 return err; ··· 1675 1678 &req->r_caps_reservation); 1676 1679 if (ret < 0) { 1677 1680 pr_err("fill_inode badness on %p\n", in); 1678 - if (d_really_is_negative(dn)) 1679 - iput(in); 1681 + if (d_really_is_negative(dn)) { 1682 + /* avoid calling iput_final() in mds 1683 + * dispatch threads */ 1684 + ceph_async_iput(in); 1685 + } 1680 1686 d_drop(dn); 1681 1687 err = ret; 1682 1688 goto next_item; ··· 1689 1689 if (ceph_security_xattr_deadlock(in)) { 1690 1690 dout(" skip splicing dn %p to inode %p" 1691 1691 " (security xattr deadlock)\n", dn, in); 1692 - iput(in); 1692 + ceph_async_iput(in); 1693 1693 skipped++; 1694 1694 goto next_item; 1695 1695 } ··· 1741 1741 } 1742 1742 1743 1743 /* 1744 + * Put reference to inode, but avoid calling iput_final() in current thread. 1745 + * iput_final() may wait for reahahead pages. The wait can cause deadlock in 1746 + * some contexts. 1747 + */ 1748 + void ceph_async_iput(struct inode *inode) 1749 + { 1750 + if (!inode) 1751 + return; 1752 + for (;;) { 1753 + if (atomic_add_unless(&inode->i_count, -1, 1)) 1754 + break; 1755 + if (queue_work(ceph_inode_to_client(inode)->inode_wq, 1756 + &ceph_inode(inode)->i_work)) 1757 + break; 1758 + /* queue work failed, i_count must be at least 2 */ 1759 + } 1760 + } 1761 + 1762 + /* 1744 1763 * Write back inode data in a worker thread. (This can't be done 1745 1764 * in the message handler context.) 1746 1765 */ 1747 1766 void ceph_queue_writeback(struct inode *inode) 1748 1767 { 1768 + struct ceph_inode_info *ci = ceph_inode(inode); 1769 + set_bit(CEPH_I_WORK_WRITEBACK, &ci->i_work_mask); 1770 + 1749 1771 ihold(inode); 1750 - if (queue_work(ceph_inode_to_client(inode)->wb_wq, 1751 - &ceph_inode(inode)->i_wb_work)) { 1772 + if (queue_work(ceph_inode_to_client(inode)->inode_wq, 1773 + &ci->i_work)) { 1752 1774 dout("ceph_queue_writeback %p\n", inode); 1753 1775 } else { 1754 - dout("ceph_queue_writeback %p failed\n", inode); 1776 + dout("ceph_queue_writeback %p already queued, mask=%lx\n", 1777 + inode, ci->i_work_mask); 1755 1778 iput(inode); 1756 1779 } 1757 - } 1758 - 1759 - static void ceph_writeback_work(struct work_struct *work) 1760 - { 1761 - struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, 1762 - i_wb_work); 1763 - struct inode *inode = &ci->vfs_inode; 1764 - 1765 - dout("writeback %p\n", inode); 1766 - filemap_fdatawrite(&inode->i_data); 1767 - iput(inode); 1768 1780 } 1769 1781 1770 1782 /* ··· 1784 1772 */ 1785 1773 void ceph_queue_invalidate(struct inode *inode) 1786 1774 { 1775 + struct ceph_inode_info *ci = ceph_inode(inode); 1776 + set_bit(CEPH_I_WORK_INVALIDATE_PAGES, &ci->i_work_mask); 1777 + 1787 1778 ihold(inode); 1788 - if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq, 1789 - &ceph_inode(inode)->i_pg_inv_work)) { 1779 + if (queue_work(ceph_inode_to_client(inode)->inode_wq, 1780 + &ceph_inode(inode)->i_work)) { 1790 1781 dout("ceph_queue_invalidate %p\n", inode); 1791 1782 } else { 1792 - dout("ceph_queue_invalidate %p failed\n", inode); 1783 + dout("ceph_queue_invalidate %p already queued, mask=%lx\n", 1784 + inode, ci->i_work_mask); 1793 1785 iput(inode); 1794 1786 } 1795 1787 } 1796 1788 1797 1789 /* 1798 - * Invalidate inode pages in a worker thread. (This can't be done 1799 - * in the message handler context.) 1790 + * Queue an async vmtruncate. If we fail to queue work, we will handle 1791 + * the truncation the next time we call __ceph_do_pending_vmtruncate. 1800 1792 */ 1801 - static void ceph_invalidate_work(struct work_struct *work) 1793 + void ceph_queue_vmtruncate(struct inode *inode) 1802 1794 { 1803 - struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, 1804 - i_pg_inv_work); 1805 - struct inode *inode = &ci->vfs_inode; 1795 + struct ceph_inode_info *ci = ceph_inode(inode); 1796 + set_bit(CEPH_I_WORK_VMTRUNCATE, &ci->i_work_mask); 1797 + 1798 + ihold(inode); 1799 + if (queue_work(ceph_inode_to_client(inode)->inode_wq, 1800 + &ci->i_work)) { 1801 + dout("ceph_queue_vmtruncate %p\n", inode); 1802 + } else { 1803 + dout("ceph_queue_vmtruncate %p already queued, mask=%lx\n", 1804 + inode, ci->i_work_mask); 1805 + iput(inode); 1806 + } 1807 + } 1808 + 1809 + static void ceph_do_invalidate_pages(struct inode *inode) 1810 + { 1811 + struct ceph_inode_info *ci = ceph_inode(inode); 1806 1812 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 1807 1813 u32 orig_gen; 1808 1814 int check = 0; ··· 1872 1842 out: 1873 1843 if (check) 1874 1844 ceph_check_caps(ci, 0, NULL); 1875 - iput(inode); 1876 - } 1877 - 1878 - 1879 - /* 1880 - * called by trunc_wq; 1881 - * 1882 - * We also truncate in a separate thread as well. 1883 - */ 1884 - static void ceph_vmtruncate_work(struct work_struct *work) 1885 - { 1886 - struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, 1887 - i_vmtruncate_work); 1888 - struct inode *inode = &ci->vfs_inode; 1889 - 1890 - dout("vmtruncate_work %p\n", inode); 1891 - __ceph_do_pending_vmtruncate(inode); 1892 - iput(inode); 1893 - } 1894 - 1895 - /* 1896 - * Queue an async vmtruncate. If we fail to queue work, we will handle 1897 - * the truncation the next time we call __ceph_do_pending_vmtruncate. 1898 - */ 1899 - void ceph_queue_vmtruncate(struct inode *inode) 1900 - { 1901 - struct ceph_inode_info *ci = ceph_inode(inode); 1902 - 1903 - ihold(inode); 1904 - 1905 - if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq, 1906 - &ci->i_vmtruncate_work)) { 1907 - dout("ceph_queue_vmtruncate %p\n", inode); 1908 - } else { 1909 - dout("ceph_queue_vmtruncate %p failed, pending=%d\n", 1910 - inode, ci->i_truncate_pending); 1911 - iput(inode); 1912 - } 1913 1845 } 1914 1846 1915 1847 /* ··· 1933 1941 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); 1934 1942 1935 1943 wake_up_all(&ci->i_cap_wq); 1944 + } 1945 + 1946 + static void ceph_inode_work(struct work_struct *work) 1947 + { 1948 + struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, 1949 + i_work); 1950 + struct inode *inode = &ci->vfs_inode; 1951 + 1952 + if (test_and_clear_bit(CEPH_I_WORK_WRITEBACK, &ci->i_work_mask)) { 1953 + dout("writeback %p\n", inode); 1954 + filemap_fdatawrite(&inode->i_data); 1955 + } 1956 + if (test_and_clear_bit(CEPH_I_WORK_INVALIDATE_PAGES, &ci->i_work_mask)) 1957 + ceph_do_invalidate_pages(inode); 1958 + 1959 + if (test_and_clear_bit(CEPH_I_WORK_VMTRUNCATE, &ci->i_work_mask)) 1960 + __ceph_do_pending_vmtruncate(inode); 1961 + 1962 + iput(inode); 1936 1963 } 1937 1964 1938 1965 /*
+18 -10
fs/ceph/mds_client.c
··· 690 690 ceph_msg_put(req->r_reply); 691 691 if (req->r_inode) { 692 692 ceph_put_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN); 693 - iput(req->r_inode); 693 + /* avoid calling iput_final() in mds dispatch threads */ 694 + ceph_async_iput(req->r_inode); 694 695 } 695 696 if (req->r_parent) 696 697 ceph_put_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN); 697 - iput(req->r_target_inode); 698 + ceph_async_iput(req->r_target_inode); 698 699 if (req->r_dentry) 699 700 dput(req->r_dentry); 700 701 if (req->r_old_dentry) ··· 709 708 */ 710 709 ceph_put_cap_refs(ceph_inode(req->r_old_dentry_dir), 711 710 CEPH_CAP_PIN); 712 - iput(req->r_old_dentry_dir); 711 + ceph_async_iput(req->r_old_dentry_dir); 713 712 } 714 713 kfree(req->r_path1); 715 714 kfree(req->r_path2); ··· 819 818 } 820 819 821 820 if (req->r_unsafe_dir) { 822 - iput(req->r_unsafe_dir); 821 + /* avoid calling iput_final() in mds dispatch threads */ 822 + ceph_async_iput(req->r_unsafe_dir); 823 823 req->r_unsafe_dir = NULL; 824 824 } 825 825 ··· 985 983 cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node); 986 984 if (!cap) { 987 985 spin_unlock(&ci->i_ceph_lock); 988 - iput(inode); 986 + ceph_async_iput(inode); 989 987 goto random; 990 988 } 991 989 mds = cap->session->s_mds; ··· 994 992 cap == ci->i_auth_cap ? "auth " : "", cap); 995 993 spin_unlock(&ci->i_ceph_lock); 996 994 out: 997 - iput(inode); 995 + /* avoid calling iput_final() while holding mdsc->mutex or 996 + * in mds dispatch threads */ 997 + ceph_async_iput(inode); 998 998 return mds; 999 999 1000 1000 random: ··· 1306 1302 spin_unlock(&session->s_cap_lock); 1307 1303 1308 1304 if (last_inode) { 1309 - iput(last_inode); 1305 + /* avoid calling iput_final() while holding 1306 + * s_mutex or in mds dispatch threads */ 1307 + ceph_async_iput(last_inode); 1310 1308 last_inode = NULL; 1311 1309 } 1312 1310 if (old_cap) { ··· 1341 1335 session->s_cap_iterator = NULL; 1342 1336 spin_unlock(&session->s_cap_lock); 1343 1337 1344 - iput(last_inode); 1338 + ceph_async_iput(last_inode); 1345 1339 if (old_cap) 1346 1340 ceph_put_cap(session->s_mdsc, old_cap); 1347 1341 ··· 1477 1471 spin_unlock(&session->s_cap_lock); 1478 1472 1479 1473 inode = ceph_find_inode(sb, vino); 1480 - iput(inode); 1474 + /* avoid calling iput_final() while holding s_mutex */ 1475 + ceph_async_iput(inode); 1481 1476 1482 1477 spin_lock(&session->s_cap_lock); 1483 1478 } ··· 3919 3912 ceph_con_send(&session->s_con, msg); 3920 3913 3921 3914 out: 3922 - iput(inode); 3923 3915 mutex_unlock(&session->s_mutex); 3916 + /* avoid calling iput_final() in mds dispatch threads */ 3917 + ceph_async_iput(inode); 3924 3918 return; 3925 3919 3926 3920 bad:
+6 -3
fs/ceph/quota.c
··· 74 74 le64_to_cpu(h->max_files)); 75 75 spin_unlock(&ci->i_ceph_lock); 76 76 77 - iput(inode); 77 + /* avoid calling iput_final() in dispatch thread */ 78 + ceph_async_iput(inode); 78 79 } 79 80 80 81 static struct ceph_quotarealm_inode * ··· 236 235 237 236 ci = ceph_inode(in); 238 237 has_quota = __ceph_has_any_quota(ci); 239 - iput(in); 238 + /* avoid calling iput_final() while holding mdsc->snap_rwsem */ 239 + ceph_async_iput(in); 240 240 241 241 next = realm->parent; 242 242 if (has_quota || !next) ··· 374 372 pr_warn("Invalid quota check op (%d)\n", op); 375 373 exceeded = true; /* Just break the loop */ 376 374 } 377 - iput(in); 375 + /* avoid calling iput_final() while holding mdsc->snap_rwsem */ 376 + ceph_async_iput(in); 378 377 379 378 next = realm->parent; 380 379 if (exceeded || !next)
+11 -5
fs/ceph/snap.c
··· 648 648 if (!inode) 649 649 continue; 650 650 spin_unlock(&realm->inodes_with_caps_lock); 651 - iput(lastinode); 651 + /* avoid calling iput_final() while holding 652 + * mdsc->snap_rwsem or in mds dispatch threads */ 653 + ceph_async_iput(lastinode); 652 654 lastinode = inode; 653 655 ceph_queue_cap_snap(ci); 654 656 spin_lock(&realm->inodes_with_caps_lock); 655 657 } 656 658 spin_unlock(&realm->inodes_with_caps_lock); 657 - iput(lastinode); 659 + ceph_async_iput(lastinode); 658 660 659 661 dout("queue_realm_cap_snaps %p %llx done\n", realm, realm->ino); 660 662 } ··· 808 806 ihold(inode); 809 807 spin_unlock(&mdsc->snap_flush_lock); 810 808 ceph_flush_snaps(ci, &session); 811 - iput(inode); 809 + /* avoid calling iput_final() while holding 810 + * session->s_mutex or in mds dispatch threads */ 811 + ceph_async_iput(inode); 812 812 spin_lock(&mdsc->snap_flush_lock); 813 813 } 814 814 spin_unlock(&mdsc->snap_flush_lock); ··· 954 950 ceph_get_snap_realm(mdsc, realm); 955 951 ceph_put_snap_realm(mdsc, oldrealm); 956 952 957 - iput(inode); 953 + /* avoid calling iput_final() while holding 954 + * mdsc->snap_rwsem or mds in dispatch threads */ 955 + ceph_async_iput(inode); 958 956 continue; 959 957 960 958 skip_inode: 961 959 spin_unlock(&ci->i_ceph_lock); 962 - iput(inode); 960 + ceph_async_iput(inode); 963 961 } 964 962 965 963 /* we may have taken some of the old realm's children. */
+7 -21
fs/ceph/super.c
··· 672 672 * The number of concurrent works can be high but they don't need 673 673 * to be processed in parallel, limit concurrency. 674 674 */ 675 - fsc->wb_wq = alloc_workqueue("ceph-writeback", 0, 1); 676 - if (!fsc->wb_wq) 675 + fsc->inode_wq = alloc_workqueue("ceph-inode", WQ_UNBOUND, 0); 676 + if (!fsc->inode_wq) 677 677 goto fail_client; 678 - fsc->pg_inv_wq = alloc_workqueue("ceph-pg-invalid", 0, 1); 679 - if (!fsc->pg_inv_wq) 680 - goto fail_wb_wq; 681 - fsc->trunc_wq = alloc_workqueue("ceph-trunc", 0, 1); 682 - if (!fsc->trunc_wq) 683 - goto fail_pg_inv_wq; 684 678 fsc->cap_wq = alloc_workqueue("ceph-cap", 0, 1); 685 679 if (!fsc->cap_wq) 686 - goto fail_trunc_wq; 680 + goto fail_inode_wq; 687 681 688 682 /* set up mempools */ 689 683 err = -ENOMEM; ··· 691 697 692 698 fail_cap_wq: 693 699 destroy_workqueue(fsc->cap_wq); 694 - fail_trunc_wq: 695 - destroy_workqueue(fsc->trunc_wq); 696 - fail_pg_inv_wq: 697 - destroy_workqueue(fsc->pg_inv_wq); 698 - fail_wb_wq: 699 - destroy_workqueue(fsc->wb_wq); 700 + fail_inode_wq: 701 + destroy_workqueue(fsc->inode_wq); 700 702 fail_client: 701 703 ceph_destroy_client(fsc->client); 702 704 fail: ··· 705 715 706 716 static void flush_fs_workqueues(struct ceph_fs_client *fsc) 707 717 { 708 - flush_workqueue(fsc->wb_wq); 709 - flush_workqueue(fsc->pg_inv_wq); 710 - flush_workqueue(fsc->trunc_wq); 718 + flush_workqueue(fsc->inode_wq); 711 719 flush_workqueue(fsc->cap_wq); 712 720 } 713 721 ··· 713 725 { 714 726 dout("destroy_fs_client %p\n", fsc); 715 727 716 - destroy_workqueue(fsc->wb_wq); 717 - destroy_workqueue(fsc->pg_inv_wq); 718 - destroy_workqueue(fsc->trunc_wq); 728 + destroy_workqueue(fsc->inode_wq); 719 729 destroy_workqueue(fsc->cap_wq); 720 730 721 731 mempool_destroy(fsc->wb_pagevec_pool);
+11 -8
fs/ceph/super.h
··· 109 109 mempool_t *wb_pagevec_pool; 110 110 atomic_long_t writeback_count; 111 111 112 - struct workqueue_struct *wb_wq; 113 - struct workqueue_struct *pg_inv_wq; 114 - struct workqueue_struct *trunc_wq; 112 + struct workqueue_struct *inode_wq; 115 113 struct workqueue_struct *cap_wq; 116 114 117 115 #ifdef CONFIG_DEBUG_FS ··· 385 387 struct list_head i_snap_realm_item; 386 388 struct list_head i_snap_flush_item; 387 389 388 - struct work_struct i_wb_work; /* writeback work */ 389 - struct work_struct i_pg_inv_work; /* page invalidation work */ 390 - 391 - struct work_struct i_vmtruncate_work; 390 + struct work_struct i_work; 391 + unsigned long i_work_mask; 392 392 393 393 #ifdef CONFIG_CEPH_FSCACHE 394 394 struct fscache_cookie *fscache; ··· 507 511 #define CEPH_I_ERROR_WRITE (1 << 11) /* have seen write errors */ 508 512 #define CEPH_I_ERROR_FILELOCK (1 << 12) /* have seen file lock errors */ 509 513 514 + 515 + /* 516 + * Masks of ceph inode work. 517 + */ 518 + #define CEPH_I_WORK_WRITEBACK 0 /* writeback */ 519 + #define CEPH_I_WORK_INVALIDATE_PAGES 1 /* invalidate pages */ 520 + #define CEPH_I_WORK_VMTRUNCATE 2 /* vmtruncate */ 510 521 511 522 /* 512 523 * We set the ERROR_WRITE bit when we start seeing write errors on an inode ··· 899 896 extern bool ceph_inode_set_size(struct inode *inode, loff_t size); 900 897 extern void __ceph_do_pending_vmtruncate(struct inode *inode); 901 898 extern void ceph_queue_vmtruncate(struct inode *inode); 902 - 903 899 extern void ceph_queue_invalidate(struct inode *inode); 904 900 extern void ceph_queue_writeback(struct inode *inode); 901 + extern void ceph_async_iput(struct inode *inode); 905 902 906 903 extern int __ceph_do_getattr(struct inode *inode, struct page *locked_page, 907 904 int mask, bool force);