Skip to content

Commit a03bfc3

Browse files
yishaihjgunthorpe
authored andcommitted
RDMA/mlx5: Sync device with CPU pages upon ODP MR registration
Sync device with CPU pages upon ODP MR registration. mlx5 already has to zero the HW's version of the PAS list, may as well deliver a PAS list that matches the current CPU page tables configuration. Link: https://lore.kernel.org/r/20200930163828.1336747-5-leon@kernel.org Signed-off-by: Yishai Hadas <yishaih@nvidia.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
1 parent 677cf51 commit a03bfc3

3 files changed

Lines changed: 32 additions & 6 deletions

File tree

drivers/infiniband/hw/mlx5/mlx5_ib.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1283,6 +1283,7 @@ void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
12831283
int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
12841284
enum ib_uverbs_advise_mr_advice advice,
12851285
u32 flags, struct ib_sge *sg_list, u32 num_sge);
1286+
int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr, bool enable);
12861287
#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
12871288
static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
12881289
{
@@ -1304,6 +1305,10 @@ mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
13041305
{
13051306
return -EOPNOTSUPP;
13061307
}
1308+
static inline int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr, bool enable)
1309+
{
1310+
return -EOPNOTSUPP;
1311+
}
13071312
#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
13081313

13091314
extern const struct mmu_interval_notifier_ops mlx5_mn_ops;

drivers/infiniband/hw/mlx5/mr.c

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1421,17 +1421,14 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
14211421
mr->umem = umem;
14221422
set_mr_fields(dev, mr, npages, length, access_flags);
14231423

1424-
if (xlt_with_umr) {
1424+
if (xlt_with_umr && !(access_flags & IB_ACCESS_ON_DEMAND)) {
14251425
/*
14261426
* If the MR was created with reg_create then it will be
14271427
* configured properly but left disabled. It is safe to go ahead
14281428
* and configure it again via UMR while enabling it.
14291429
*/
14301430
int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
14311431

1432-
if (access_flags & IB_ACCESS_ON_DEMAND)
1433-
update_xlt_flags |= MLX5_IB_UPD_XLT_ZAP;
1434-
14351432
err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift,
14361433
update_xlt_flags);
14371434
if (err) {
@@ -1451,6 +1448,12 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
14511448
dereg_mr(dev, mr);
14521449
return ERR_PTR(err);
14531450
}
1451+
1452+
err = mlx5_ib_init_odp_mr(mr, xlt_with_umr);
1453+
if (err) {
1454+
dereg_mr(dev, mr);
1455+
return ERR_PTR(err);
1456+
}
14541457
}
14551458

14561459
return &mr->ibmr;

drivers/infiniband/hw/mlx5/odp.c

Lines changed: 20 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -666,6 +666,7 @@ void mlx5_ib_fence_odp_mr(struct mlx5_ib_mr *mr)
666666

667667
#define MLX5_PF_FLAGS_DOWNGRADE BIT(1)
668668
#define MLX5_PF_FLAGS_SNAPSHOT BIT(2)
669+
#define MLX5_PF_FLAGS_ENABLE BIT(3)
669670
static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp,
670671
u64 user_va, size_t bcnt, u32 *bytes_mapped,
671672
u32 flags)
@@ -675,6 +676,10 @@ static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp,
675676
u64 access_mask;
676677
u64 start_idx;
677678
bool fault = !(flags & MLX5_PF_FLAGS_SNAPSHOT);
679+
u32 xlt_flags = MLX5_IB_UPD_XLT_ATOMIC;
680+
681+
if (flags & MLX5_PF_FLAGS_ENABLE)
682+
xlt_flags |= MLX5_IB_UPD_XLT_ENABLE;
678683

679684
page_shift = odp->page_shift;
680685
start_idx = (user_va - ib_umem_start(odp)) >> page_shift;
@@ -691,8 +696,7 @@ static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp,
691696
* No need to check whether the MTTs really belong to this MR, since
692697
* ib_umem_odp_map_dma_and_lock already checks this.
693698
*/
694-
ret = mlx5_ib_update_xlt(mr, start_idx, np, page_shift,
695-
MLX5_IB_UPD_XLT_ATOMIC);
699+
ret = mlx5_ib_update_xlt(mr, start_idx, np, page_shift, xlt_flags);
696700
mutex_unlock(&odp->umem_mutex);
697701

698702
if (ret < 0) {
@@ -827,6 +831,20 @@ static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt,
827831
flags);
828832
}
829833

834+
int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr, bool enable)
835+
{
836+
u32 flags = MLX5_PF_FLAGS_SNAPSHOT;
837+
int ret;
838+
839+
if (enable)
840+
flags |= MLX5_PF_FLAGS_ENABLE;
841+
842+
ret = pagefault_real_mr(mr, to_ib_umem_odp(mr->umem),
843+
mr->umem->address, mr->umem->length, NULL,
844+
flags);
845+
return ret >= 0 ? 0 : ret;
846+
}
847+
830848
struct pf_frame {
831849
struct pf_frame *next;
832850
u32 key;

0 commit comments

Comments
 (0)