Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

RDMA/mlx5: Change check for cacheable mkeys

umem can be NULL for user application mkeys in some cases. Therefore
umem can't be used for checking if the mkey is cacheable and it is
changed for checking a flag that indicates it. Also make sure that
all mkeys which are not returned to the cache will be destroyed.

Fixes: dd1b913fb0d0 ("RDMA/mlx5: Cache all user cacheable mkeys on dereg MR flow")
Signed-off-by: Or Har-Toov <ohartoov@nvidia.com>
Link: https://lore.kernel.org/r/2690bc5c6896bcb937f89af16a1ff0343a7ab3d0.1712140377.git.leon@kernel.org
Signed-off-by: Leon Romanovsky <leon@kernel.org>

authored by

Or Har-Toov and committed by
Leon Romanovsky
8c1185fe 0611a8e8

+23 -10
+1
drivers/infiniband/hw/mlx5/mlx5_ib.h
··· 646 646 /* Cacheable user Mkey must hold either a rb_key or a cache_ent. */ 647 647 struct mlx5r_cache_rb_key rb_key; 648 648 struct mlx5_cache_ent *cache_ent; 649 + u8 cacheable : 1; 649 650 }; 650 651 651 652 #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
+22 -10
drivers/infiniband/hw/mlx5/mr.c
··· 1158 1158 if (IS_ERR(mr)) 1159 1159 return mr; 1160 1160 mr->mmkey.rb_key = rb_key; 1161 + mr->mmkey.cacheable = true; 1161 1162 return mr; 1162 1163 } 1163 1164 ··· 1169 1168 mr->ibmr.pd = pd; 1170 1169 mr->umem = umem; 1171 1170 mr->page_shift = order_base_2(page_size); 1171 + mr->mmkey.cacheable = true; 1172 1172 set_mr_fields(dev, mr, umem->length, access_flags, iova); 1173 1173 1174 1174 return mr; ··· 1837 1835 return ret; 1838 1836 } 1839 1837 1838 + static int mlx5_revoke_mr(struct mlx5_ib_mr *mr) 1839 + { 1840 + struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); 1841 + struct mlx5_cache_ent *ent = mr->mmkey.cache_ent; 1842 + 1843 + if (mr->mmkey.cacheable && !mlx5r_umr_revoke_mr(mr) && !cache_ent_find_and_store(dev, mr)) 1844 + return 0; 1845 + 1846 + if (ent) { 1847 + spin_lock_irq(&ent->mkeys_queue.lock); 1848 + ent->in_use--; 1849 + mr->mmkey.cache_ent = NULL; 1850 + spin_unlock_irq(&ent->mkeys_queue.lock); 1851 + } 1852 + return destroy_mkey(dev, mr); 1853 + } 1854 + 1840 1855 int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) 1841 1856 { 1842 1857 struct mlx5_ib_mr *mr = to_mmr(ibmr); ··· 1899 1880 } 1900 1881 1901 1882 /* Stop DMA */ 1902 - if (mr->umem && mlx5r_umr_can_load_pas(dev, mr->umem->length)) 1903 - if (mlx5r_umr_revoke_mr(mr) || 1904 - cache_ent_find_and_store(dev, mr)) 1905 - mr->mmkey.cache_ent = NULL; 1906 - 1907 - if (!mr->mmkey.cache_ent) { 1908 - rc = destroy_mkey(to_mdev(mr->ibmr.device), mr); 1909 - if (rc) 1910 - return rc; 1911 - } 1883 + rc = mlx5_revoke_mr(mr); 1884 + if (rc) 1885 + return rc; 1912 1886 1913 1887 if (mr->umem) { 1914 1888 bool is_odp = is_odp_mr(mr);