Skip to content

Commit e297822

Browse files
isilenceaxboe
authored andcommitted
io_uring: order refnode recycling
Don't recycle a refnode until we're done with all requests of nodes ejected before. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Cc: stable@vger.kernel.org # v5.7+ Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 1e5d770 commit e297822

1 file changed

Lines changed: 23 additions & 10 deletions

File tree

fs/io_uring.c

Lines changed: 23 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -205,6 +205,7 @@ struct fixed_file_ref_node {
205205
struct list_head file_list;
206206
struct fixed_file_data *file_data;
207207
struct llist_node llist;
208+
bool done;
208209
};
209210

210211
struct fixed_file_data {
@@ -7323,10 +7324,6 @@ static void __io_file_put_work(struct fixed_file_ref_node *ref_node)
73237324
kfree(pfile);
73247325
}
73257326

7326-
spin_lock(&file_data->lock);
7327-
list_del(&ref_node->node);
7328-
spin_unlock(&file_data->lock);
7329-
73307327
percpu_ref_exit(&ref_node->refs);
73317328
kfree(ref_node);
73327329
percpu_ref_put(&file_data->refs);
@@ -7353,17 +7350,32 @@ static void io_file_put_work(struct work_struct *work)
73537350
static void io_file_data_ref_zero(struct percpu_ref *ref)
73547351
{
73557352
struct fixed_file_ref_node *ref_node;
7353+
struct fixed_file_data *data;
73567354
struct io_ring_ctx *ctx;
7357-
bool first_add;
7355+
bool first_add = false;
73587356
int delay = HZ;
73597357

73607358
ref_node = container_of(ref, struct fixed_file_ref_node, refs);
7361-
ctx = ref_node->file_data->ctx;
7359+
data = ref_node->file_data;
7360+
ctx = data->ctx;
7361+
7362+
spin_lock(&data->lock);
7363+
ref_node->done = true;
7364+
7365+
while (!list_empty(&data->ref_list)) {
7366+
ref_node = list_first_entry(&data->ref_list,
7367+
struct fixed_file_ref_node, node);
7368+
/* recycle ref nodes in order */
7369+
if (!ref_node->done)
7370+
break;
7371+
list_del(&ref_node->node);
7372+
first_add |= llist_add(&ref_node->llist, &ctx->file_put_llist);
7373+
}
7374+
spin_unlock(&data->lock);
73627375

7363-
if (percpu_ref_is_dying(&ctx->file_data->refs))
7376+
if (percpu_ref_is_dying(&data->refs))
73647377
delay = 0;
73657378

7366-
first_add = llist_add(&ref_node->llist, &ctx->file_put_llist);
73677379
if (!delay)
73687380
mod_delayed_work(system_wq, &ctx->file_put_work, 0);
73697381
else if (first_add)
@@ -7387,6 +7399,7 @@ static struct fixed_file_ref_node *alloc_fixed_file_ref_node(
73877399
INIT_LIST_HEAD(&ref_node->node);
73887400
INIT_LIST_HEAD(&ref_node->file_list);
73897401
ref_node->file_data = ctx->file_data;
7402+
ref_node->done = false;
73907403
return ref_node;
73917404
}
73927405

@@ -7482,7 +7495,7 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
74827495

74837496
file_data->node = ref_node;
74847497
spin_lock(&file_data->lock);
7485-
list_add(&ref_node->node, &file_data->ref_list);
7498+
list_add_tail(&ref_node->node, &file_data->ref_list);
74867499
spin_unlock(&file_data->lock);
74877500
percpu_ref_get(&file_data->refs);
74887501
return ret;
@@ -7641,7 +7654,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
76417654
if (needs_switch) {
76427655
percpu_ref_kill(&data->node->refs);
76437656
spin_lock(&data->lock);
7644-
list_add(&ref_node->node, &data->ref_list);
7657+
list_add_tail(&ref_node->node, &data->ref_list);
76457658
data->node = ref_node;
76467659
spin_unlock(&data->lock);
76477660
percpu_ref_get(&ctx->file_data->refs);

0 commit comments

Comments
 (0)