Skip to content

Commit eefdf30

Browse files
committed
io_uring: fix IOPOLL -EAGAIN retries
This normally isn't hit, as polling is mostly done on NVMe with deep queue depths. But if we do run into request starvation, we need to ensure that retries are properly serialized. Reported-by: Andres Freund <andres@anarazel.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 56450c2 commit eefdf30

1 file changed

Lines changed: 9 additions & 5 deletions

File tree

fs/io_uring.c

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1150,7 +1150,7 @@ static void io_prep_async_work(struct io_kiocb *req)
11501150
io_req_init_async(req);
11511151

11521152
if (req->flags & REQ_F_ISREG) {
1153-
if (def->hash_reg_file)
1153+
if (def->hash_reg_file || (req->ctx->flags & IORING_SETUP_IOPOLL))
11541154
io_wq_hash_work(&req->work, file_inode(req->file));
11551155
} else {
11561156
if (def->unbound_nonreg_file)
@@ -3132,6 +3132,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
31323132
ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
31333133
if (ret < 0)
31343134
return ret;
3135+
iov_count = iov_iter_count(iter);
31353136
io_size = ret;
31363137
req->result = io_size;
31373138
ret = 0;
@@ -3144,7 +3145,6 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
31443145
if (force_nonblock && !io_file_supports_async(req->file, READ))
31453146
goto copy_iov;
31463147

3147-
iov_count = iov_iter_count(iter);
31483148
ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), iov_count);
31493149
if (unlikely(ret))
31503150
goto out_free;
@@ -3157,7 +3157,8 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
31573157
ret = 0;
31583158
goto out_free;
31593159
} else if (ret == -EAGAIN) {
3160-
if (!force_nonblock)
3160+
/* IOPOLL retry should happen for io-wq threads */
3161+
if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
31613162
goto done;
31623163
/* some cases will consume bytes even on error returns */
31633164
iov_iter_revert(iter, iov_count - iov_iter_count(iter));
@@ -3251,6 +3252,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
32513252
ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
32523253
if (ret < 0)
32533254
return ret;
3255+
iov_count = iov_iter_count(iter);
32543256
io_size = ret;
32553257
req->result = io_size;
32563258

@@ -3267,7 +3269,6 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
32673269
(req->flags & REQ_F_ISREG))
32683270
goto copy_iov;
32693271

3270-
iov_count = iov_iter_count(iter);
32713272
ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), iov_count);
32723273
if (unlikely(ret))
32733274
goto out_free;
@@ -3301,11 +3302,14 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
33013302
if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
33023303
ret2 = -EAGAIN;
33033304
if (!force_nonblock || ret2 != -EAGAIN) {
3305+
/* IOPOLL retry should happen for io-wq threads */
3306+
if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
3307+
goto copy_iov;
33043308
kiocb_done(kiocb, ret2, cs);
33053309
} else {
3310+
copy_iov:
33063311
/* some cases will consume bytes even on error returns */
33073312
iov_iter_revert(iter, iov_count - iov_iter_count(iter));
3308-
copy_iov:
33093313
ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
33103314
if (!ret)
33113315
return -EAGAIN;

0 commit comments

Comments
 (0)