Skip to content

Commit bf6a476

Browse files
Hakon-Buggejgunthorpe
authored andcommitted
IB/mlx4: Convert rej_tmout radix-tree to XArray
Was missed during the initial review of the below patch Fixes: 227a0e1 ("IB/mlx4: Add support for REJ due to timeout") Link: https://lore.kernel.org/r/1602253482-6718-1-git-send-email-haakon.bugge@oracle.com Signed-off-by: Håkon Bugge <haakon.bugge@oracle.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
1 parent de55412 commit bf6a476

2 files changed

Lines changed: 51 additions & 49 deletions

File tree

drivers/infiniband/hw/mlx4/cm.c

Lines changed: 50 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -58,9 +58,7 @@ struct rej_tmout_entry {
5858
int slave;
5959
u32 rem_pv_cm_id;
6060
struct delayed_work timeout;
61-
struct radix_tree_root *rej_tmout_root;
62-
/* Points to the mutex protecting this radix-tree */
63-
struct mutex *lock;
61+
struct xarray *xa_rej_tmout;
6462
};
6563

6664
struct cm_generic_msg {
@@ -350,9 +348,7 @@ static void rej_tmout_timeout(struct work_struct *work)
350348
struct rej_tmout_entry *item = container_of(delay, struct rej_tmout_entry, timeout);
351349
struct rej_tmout_entry *deleted;
352350

353-
mutex_lock(item->lock);
354-
deleted = radix_tree_delete_item(item->rej_tmout_root, item->rem_pv_cm_id, NULL);
355-
mutex_unlock(item->lock);
351+
deleted = xa_cmpxchg(item->xa_rej_tmout, item->rem_pv_cm_id, item, NULL, 0);
356352

357353
if (deleted != item)
358354
pr_debug("deleted(%p) != item(%p)\n", deleted, item);
@@ -363,18 +359,21 @@ static void rej_tmout_timeout(struct work_struct *work)
363359
static int alloc_rej_tmout(struct mlx4_ib_sriov *sriov, u32 rem_pv_cm_id, int slave)
364360
{
365361
struct rej_tmout_entry *item;
366-
int sts;
362+
struct rej_tmout_entry *old;
363+
int ret = 0;
364+
365+
xa_lock(&sriov->xa_rej_tmout);
366+
item = xa_load(&sriov->xa_rej_tmout, (unsigned long)rem_pv_cm_id);
367367

368-
mutex_lock(&sriov->rej_tmout_lock);
369-
item = radix_tree_lookup(&sriov->rej_tmout_root, (unsigned long)rem_pv_cm_id);
370-
mutex_unlock(&sriov->rej_tmout_lock);
371368
if (item) {
372-
if (IS_ERR(item))
373-
return PTR_ERR(item);
374-
/* If a retry, adjust delayed work */
375-
mod_delayed_work(system_wq, &item->timeout, CM_CLEANUP_CACHE_TIMEOUT);
376-
return 0;
369+
if (xa_err(item))
370+
ret = xa_err(item);
371+
else
372+
/* If a retry, adjust delayed work */
373+
mod_delayed_work(system_wq, &item->timeout, CM_CLEANUP_CACHE_TIMEOUT);
374+
goto err_or_exists;
377375
}
376+
xa_unlock(&sriov->xa_rej_tmout);
378377

379378
item = kmalloc(sizeof(*item), GFP_KERNEL);
380379
if (!item)
@@ -383,39 +382,44 @@ static int alloc_rej_tmout(struct mlx4_ib_sriov *sriov, u32 rem_pv_cm_id, int sl
383382
INIT_DELAYED_WORK(&item->timeout, rej_tmout_timeout);
384383
item->slave = slave;
385384
item->rem_pv_cm_id = rem_pv_cm_id;
386-
item->rej_tmout_root = &sriov->rej_tmout_root;
387-
item->lock = &sriov->rej_tmout_lock;
388-
389-
mutex_lock(&sriov->rej_tmout_lock);
390-
sts = radix_tree_insert(&sriov->rej_tmout_root, (unsigned long)rem_pv_cm_id, item);
391-
mutex_unlock(&sriov->rej_tmout_lock);
392-
if (sts)
393-
goto err_insert;
385+
item->xa_rej_tmout = &sriov->xa_rej_tmout;
386+
387+
old = xa_cmpxchg(&sriov->xa_rej_tmout, (unsigned long)rem_pv_cm_id, NULL, item, GFP_KERNEL);
388+
if (old) {
389+
pr_debug(
390+
"Non-null old entry (%p) or error (%d) when inserting\n",
391+
old, xa_err(old));
392+
kfree(item);
393+
return xa_err(old);
394+
}
394395

395396
schedule_delayed_work(&item->timeout, CM_CLEANUP_CACHE_TIMEOUT);
396397

397398
return 0;
398399

399-
err_insert:
400-
kfree(item);
401-
return sts;
400+
err_or_exists:
401+
xa_unlock(&sriov->xa_rej_tmout);
402+
return ret;
402403
}
403404

404405
static int lookup_rej_tmout_slave(struct mlx4_ib_sriov *sriov, u32 rem_pv_cm_id)
405406
{
406407
struct rej_tmout_entry *item;
408+
int slave;
407409

408-
mutex_lock(&sriov->rej_tmout_lock);
409-
item = radix_tree_lookup(&sriov->rej_tmout_root, (unsigned long)rem_pv_cm_id);
410-
mutex_unlock(&sriov->rej_tmout_lock);
410+
xa_lock(&sriov->xa_rej_tmout);
411+
item = xa_load(&sriov->xa_rej_tmout, (unsigned long)rem_pv_cm_id);
411412

412-
if (!item || IS_ERR(item)) {
413+
if (!item || xa_err(item)) {
413414
pr_debug("Could not find slave. rem_pv_cm_id 0x%x error: %d\n",
414-
rem_pv_cm_id, (int)PTR_ERR(item));
415-
return !item ? -ENOENT : PTR_ERR(item);
415+
rem_pv_cm_id, xa_err(item));
416+
slave = !item ? -ENOENT : xa_err(item);
417+
} else {
418+
slave = item->slave;
416419
}
420+
xa_unlock(&sriov->xa_rej_tmout);
417421

418-
return item->slave;
422+
return slave;
419423
}
420424

421425
int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
@@ -483,34 +487,34 @@ void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev)
483487
INIT_LIST_HEAD(&dev->sriov.cm_list);
484488
dev->sriov.sl_id_map = RB_ROOT;
485489
xa_init_flags(&dev->sriov.pv_id_table, XA_FLAGS_ALLOC);
486-
mutex_init(&dev->sriov.rej_tmout_lock);
487-
INIT_RADIX_TREE(&dev->sriov.rej_tmout_root, GFP_KERNEL);
490+
xa_init(&dev->sriov.xa_rej_tmout);
488491
}
489492

490-
static void rej_tmout_tree_cleanup(struct mlx4_ib_sriov *sriov, int slave)
493+
static void rej_tmout_xa_cleanup(struct mlx4_ib_sriov *sriov, int slave)
491494
{
492-
struct radix_tree_iter iter;
495+
struct rej_tmout_entry *item;
493496
bool flush_needed = false;
494-
__rcu void **slot;
497+
unsigned long id;
495498
int cnt = 0;
496499

497-
mutex_lock(&sriov->rej_tmout_lock);
498-
radix_tree_for_each_slot(slot, &sriov->rej_tmout_root, &iter, 0) {
499-
struct rej_tmout_entry *item = *slot;
500-
500+
xa_lock(&sriov->xa_rej_tmout);
501+
xa_for_each(&sriov->xa_rej_tmout, id, item) {
501502
if (slave < 0 || slave == item->slave) {
502503
mod_delayed_work(system_wq, &item->timeout, 0);
503504
flush_needed = true;
504505
++cnt;
505506
}
506507
}
507-
mutex_unlock(&sriov->rej_tmout_lock);
508+
xa_unlock(&sriov->xa_rej_tmout);
508509

509510
if (flush_needed) {
510511
flush_scheduled_work();
511-
pr_debug("Deleted %d entries in radix_tree for slave %d during cleanup\n",
512-
slave, cnt);
512+
pr_debug("Deleted %d entries in xarray for slave %d during cleanup\n",
513+
cnt, slave);
513514
}
515+
516+
if (slave < 0)
517+
WARN_ON(!xa_empty(&sriov->xa_rej_tmout));
514518
}
515519

516520
/* slave = -1 ==> all slaves */
@@ -581,5 +585,5 @@ void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave)
581585
kfree(map);
582586
}
583587

584-
rej_tmout_tree_cleanup(sriov, slave);
588+
rej_tmout_xa_cleanup(sriov, slave);
585589
}

drivers/infiniband/hw/mlx4/mlx4_ib.h

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -522,9 +522,7 @@ struct mlx4_ib_sriov {
522522
spinlock_t id_map_lock;
523523
struct rb_root sl_id_map;
524524
struct list_head cm_list;
525-
/* Protects the radix-tree */
526-
struct mutex rej_tmout_lock;
527-
struct radix_tree_root rej_tmout_root;
525+
struct xarray xa_rej_tmout;
528526
};
529527

530528
struct gid_cache_context {

0 commit comments

Comments
 (0)